diff options
author | Stefan Metzmacher <metze@samba.org> | 2020-05-14 13:32:47 +0200 |
---|---|---|
committer | Volker Lendecke <vl@samba.org> | 2020-07-08 11:02:39 +0000 |
commit | 4faab2a77a66497ea18ae6df8fee28e27dab1b4a (patch) | |
tree | cbc1775eec899cfd6c5a5fcb9469e2044d89b72a | |
parent | a618776ac4e097e27e3c766d996323b1d86a7241 (diff) | |
download | samba-4faab2a77a66497ea18ae6df8fee28e27dab1b4a.tar.gz |
s3:dbwrap_watch: avoid recursion into dbwrap_do_locked() from dbwrap_watched_do_locked_{storev,delete}()
This avoids a lot of overhead!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
6,877,542,529 PROGRAM TOTALS
590,000,773 lib/tdb/common/lock.c:tdb_lock_list
479,000,608 lib/tdb/common/lock.c:tdb_unlock
446,500,532 lib/tdb/common/io.c:tdb_read
364,000,824 lib/tdb/common/hash.c:tdb_jenkins_hash
285,000,532 lib/tdb/common/io.c:tdb_write
262,054,669 /x86_64/multiarch/memmove-vec-unaligned-erms.S:__memcpy_avx_unaligned_erms
206,500,496 lib/tdb/common/mutex.c:tdb_mutex_lock
193,000,176 lib/tdb/common/tdb.c:tdb_find
160,000,256 lib/talloc/talloc.c:_talloc_get_type_abort
148,500,297 lib/tdb/common/tdb.c:tdb_storev
140,000,196 lib/tdb/common/lock.c:tdb_lock
130,000,858 lib/util/debug.c:debuglevel_get_class
128,003,722 lib/talloc/talloc.c:_talloc_free
128,000,118 lib/tdb/common/tdb.c:tdb_parse_record
126,000,576 lib/tdb/common/lock.c:tdb_brlock.part.3
121,000,272 lib/tdb/common/mutex.c:tdb_mutex_unlock
118,000,225 /nptl/pthread_mutex_lock.c:__pthread_mutex_lock_full
112,750,222 lib/tdb/common/freelist.c:tdb_allocate_from_freelist
108,500,168 lib/tdb/common/io.c:tdb_ofs_read
102,500,000 lib/tdb/common/io.c:tdb_parse_data
by this:
5,706,522,398 PROGRAM TOTALS
434,000,617 lib/tdb/common/lock.c:tdb_lock_list
389,500,494 lib/tdb/common/io.c:tdb_read
359,000,488 lib/tdb/common/lock.c:tdb_unlock
285,000,532 lib/tdb/common/io.c:tdb_write
237,554,655 /x86_64/multiarch/memmove-vec-unaligned-erms.S:__memcpy_avx_unaligned_erms
208,000,668 lib/tdb/common/hash.c:tdb_jenkins_hash
206,500,496 lib/tdb/common/mutex.c:tdb_mutex_lock
160,000,256 lib/talloc/talloc.c:_talloc_get_type_abort
148,500,297 lib/tdb/common/tdb.c:tdb_storev
136,000,132 lib/tdb/common/tdb.c:tdb_find
130,000,858 lib/util/debug.c:debuglevel_get_class
126,000,576 lib/tdb/common/lock.c:tdb_brlock.part.3
121,000,272 lib/tdb/common/mutex.c:tdb_mutex_unlock
118,000,225 /nptl/pthread_mutex_lock.c:__pthread_mutex_lock_full
112,750,222 lib/tdb/common/freelist.c:tdb_allocate_from_freelist
112,000,168 lib/tdb/common/lock.c:tdb_lock
94,500,154 lib/tdb/common/io.c:tdb_ofs_read
94,000,188 /nptl/pthread_mutex_unlock.c:__pthread_mutex_unlock_full
86,000,086 lib/dbwrap/dbwrap.c:dbwrap_lock_order_lock
83,000,083 lib/dbwrap/dbwrap_tdb.c:db_tdb_do_locked
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
902834 locks/sec
real 0m11,103s
user 0m8,233s
sys 0m2,868s
vs.
1037262 locks/sec
real 0m9,685s
user 0m6,788s
sys 0m2,896s
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
Autobuild-User(master): Volker Lendecke <vl@samba.org>
Autobuild-Date(master): Wed Jul 8 11:02:39 UTC 2020 on sn-devel-184
-rw-r--r-- | source3/lib/dbwrap/dbwrap_watch.c | 60 |
1 files changed, 56 insertions, 4 deletions
diff --git a/source3/lib/dbwrap/dbwrap_watch.c b/source3/lib/dbwrap/dbwrap_watch.c index 206eabc8d5d..c442bf2e8f6 100644 --- a/source3/lib/dbwrap/dbwrap_watch.c +++ b/source3/lib/dbwrap/dbwrap_watch.c @@ -287,6 +287,14 @@ static int db_watched_subrec_destructor(struct db_watched_subrec *s) return 0; } +struct dbwrap_watched_subrec_wakeup_state { + struct messaging_context *msg_ctx; +}; +static void dbwrap_watched_subrec_wakeup_fn( + struct db_record *rec, + TDB_DATA value, + void *private_data); + struct dbwrap_watched_do_locked_state { struct db_context *db; void (*fn)(struct db_record *rec, @@ -296,6 +304,20 @@ struct dbwrap_watched_do_locked_state { struct db_watched_subrec subrec; + /* + * This contains the initial value we got + * passed to dbwrap_watched_do_locked_fn() + * + * It's only used in order to pass it + * to dbwrap_watched_subrec_wakeup_fn() + * in dbwrap_watched_do_locked_{storev,delete}() + * + * It gets cleared after the first call to + * dbwrap_watched_subrec_wakeup_fn() as we + * only need to wakeup once per dbwrap_do_locked(). + */ + TDB_DATA wakeup_value; + NTSTATUS status; }; @@ -305,8 +327,20 @@ static NTSTATUS dbwrap_watched_do_locked_storev( { struct dbwrap_watched_do_locked_state *state = rec->private_data; struct db_watched_subrec *subrec = &state->subrec; + struct db_watched_ctx *ctx = talloc_get_type_abort( + state->db->private_data, struct db_watched_ctx); + struct dbwrap_watched_subrec_wakeup_state wakeup_state = { + .msg_ctx = ctx->msg, + }; NTSTATUS status; + /* + * Wakeup only needs to happen once. + * so we clear state->wakeup_value after the first run + */ + dbwrap_watched_subrec_wakeup_fn(rec, state->wakeup_value, &wakeup_state); + state->wakeup_value = (TDB_DATA) { .dsize = 0, }; + status = dbwrap_watched_subrec_storev(rec, subrec, dbufs, num_dbufs, flags); return status; @@ -316,8 +350,20 @@ static NTSTATUS dbwrap_watched_do_locked_delete(struct db_record *rec) { struct dbwrap_watched_do_locked_state *state = rec->private_data; struct db_watched_subrec *subrec = &state->subrec; + struct db_watched_ctx *ctx = talloc_get_type_abort( + state->db->private_data, struct db_watched_ctx); + struct dbwrap_watched_subrec_wakeup_state wakeup_state = { + .msg_ctx = ctx->msg, + }; NTSTATUS status; + /* + * Wakeup only needs to happen once. + * so we clear state->wakeup_value after the first run + */ + dbwrap_watched_subrec_wakeup_fn(rec, state->wakeup_value, &wakeup_state); + state->wakeup_value = (TDB_DATA) { .dsize = 0, }; + status = dbwrap_watched_subrec_delete(rec, subrec); return status; } @@ -343,6 +389,7 @@ static void dbwrap_watched_do_locked_fn( state->subrec = (struct db_watched_subrec) { .subrec = subrec }; + state->wakeup_value = subrec_value; ok = dbwrap_watch_rec_parse(subrec_value, NULL, NULL, &value); if (!ok) { @@ -382,10 +429,6 @@ static NTSTATUS dbwrap_watched_do_locked(struct db_context *db, TDB_DATA key, return state.status; } -struct dbwrap_watched_subrec_wakeup_state { - struct messaging_context *msg_ctx; -}; - static void dbwrap_watched_subrec_wakeup_fn( struct db_record *rec, TDB_DATA value, @@ -451,6 +494,15 @@ static void dbwrap_watched_subrec_wakeup( }; NTSTATUS status; + if (rec->storev == dbwrap_watched_do_locked_storev) { + /* + * This is handled in the caller, + * as we need to avoid recursion + * into dbwrap_do_locked(). + */ + return; + } + status = dbwrap_do_locked( backend, subrec->subrec->key, |