diff options
author | Stefan Metzmacher <metze@samba.org> | 2022-06-30 15:53:47 +0200 |
---|---|---|
committer | Ralph Boehme <slow@samba.org> | 2022-07-26 13:40:34 +0000 |
commit | f62beaa2c25043a1b3dde408ce7d8f7327ac3e13 (patch) | |
tree | 500775cd4bc2f6fea8210108b88c15e36d4a08a2 | |
parent | 50163da3096d71c08e9c3ff13dd3ecb252a60f4e (diff) | |
download | samba-f62beaa2c25043a1b3dde408ce7d8f7327ac3e13.tar.gz |
s3:dbwrap_watch: allow callers of dbwrap_watched_watch_send/recv() to manage the watcher instances
The destructor triggered by dbwrap_watched_watch_recv() will
remove the watcher instance via a dedicated dbwrap_do_locked(),
just calling dbwrap_watched_watch_remove_instance() inside.
But the typical caller triggers a dbwrap_do_locked() again after
dbwrap_watched_watch_recv() returned. Which means we call
dbwrap_do_locked() twice.
We now allow dbwrap_watched_watch_recv() to return the existing
instance id (if it still exists) and removes the destructor.
That way the caller can pass the given instance id to
dbwrap_watched_watch_remove_instance() from within its own dbwrap_do_locked(),
when it decides to leave the queue, because it's happy with the new
state of the record. In order to get the best performance
dbwrap_watched_watch_remove_instance() should be called before any
dbwrap_record_storev() or dbwrap_record_delete(),
because that will only trigger a single low level storev/delete.
If the caller found out that the state of the record doesn't meet the
expectations and the callers wants to continue watching the
record (from its current position, most likely the first one),
dbwrap_watched_watch_remove_instance() can be skipped and the
instance id can be passed to dbwrap_watched_watch_send() again,
in order to resume waiting on the existing instance.
Currently the watcher instance were always removed (most likely from
the first position) and re-added (to the last position), which may
cause unfair latencies.
In order to improve the overhead of adding a new watcher instance
the caller can call dbwrap_watched_watch_add_instance() before
any dbwrap_record_storev() or dbwrap_record_delete(), which
will only result in a single low level storev/delete.
The returned instance id is then passed to dbwrap_watched_watch_send(),
within the same dbwrap_do_locked() run.
It also adds a way to avoid alerting any callers during
the current dbwrap_do_locked() run.
Layers above may only want to wake up watchers
during specific situations and while it's useless to wake
others in other situations.
This will soon be used to add more fairness to the g_lock code.
Note that this commit only prepares the api for the above to be useful,
the instance returned by dbwrap_watched_watch_recv() is most likely 0,
which means the watcher entry was already removed, but that will change
in the following commits.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15125
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Jeremy Allison <jra@samba.org>
Reviewed-by: Ralph Boehme <slow@samba.org>
-rw-r--r-- | source3/lib/dbwrap/dbwrap_watch.c | 32 | ||||
-rw-r--r-- | source3/lib/dbwrap/dbwrap_watch.h | 5 | ||||
-rw-r--r-- | source3/lib/g_lock.c | 10 | ||||
-rw-r--r-- | source3/smbd/smbXsrv_client.c | 3 | ||||
-rw-r--r-- | source3/smbd/smbXsrv_session.c | 3 | ||||
-rw-r--r-- | source3/torture/test_dbwrap_watch.c | 13 |
6 files changed, 48 insertions, 18 deletions
diff --git a/source3/lib/dbwrap/dbwrap_watch.c b/source3/lib/dbwrap/dbwrap_watch.c index 8f4b792fa63..39d8b2ac11d 100644 --- a/source3/lib/dbwrap/dbwrap_watch.c +++ b/source3/lib/dbwrap/dbwrap_watch.c @@ -206,7 +206,6 @@ static NTSTATUS dbwrap_watched_storev(struct db_record *rec, const TDB_DATA *dbufs, int num_dbufs, int flags); static NTSTATUS dbwrap_watched_delete(struct db_record *rec); -static void dbwrap_watched_watch_skip_alerting(struct db_record *rec); static int db_watched_record_destructor(struct db_watched_record *wrec); static void db_watched_record_init(struct db_context *db, @@ -888,7 +887,7 @@ struct db_context *db_open_watched(TALLOC_CTX *mem_ctx, return db; } -static uint64_t dbwrap_watched_watch_add_instance(struct db_record *rec) +uint64_t dbwrap_watched_watch_add_instance(struct db_record *rec) { struct db_watched_record *wrec = db_record_get_watched_record(rec); static uint64_t global_instance = 1; @@ -905,7 +904,7 @@ static uint64_t dbwrap_watched_watch_add_instance(struct db_record *rec) return wrec->added.instance; } -static void dbwrap_watched_watch_remove_instance(struct db_record *rec, uint64_t instance) +void dbwrap_watched_watch_remove_instance(struct db_record *rec, uint64_t instance) { struct db_watched_record *wrec = db_record_get_watched_record(rec); struct dbwrap_watcher clear_watcher = { @@ -985,7 +984,7 @@ static void dbwrap_watched_watch_remove_instance(struct db_record *rec, uint64_t return; } -static void dbwrap_watched_watch_skip_alerting(struct db_record *rec) +void dbwrap_watched_watch_skip_alerting(struct db_record *rec) { struct db_watched_record *wrec = db_record_get_watched_record(rec); @@ -1010,6 +1009,7 @@ static int dbwrap_watched_watch_state_destructor( struct tevent_req *dbwrap_watched_watch_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, struct db_record *rec, + uint64_t resumed_instance, struct server_id blocker) { struct db_context *db = dbwrap_record_get_db(rec); @@ -1033,11 +1033,23 @@ struct tevent_req *dbwrap_watched_watch_send(TALLOC_CTX *mem_ctx, return tevent_req_post(req, ev); } - if (wrec->added.instance == 0) { + if (resumed_instance == 0 && wrec->added.instance == 0) { /* * Adding a new instance */ instance = dbwrap_watched_watch_add_instance(rec); + } else if (resumed_instance != 0 && wrec->added.instance == 0) { + /* + * Resuming an existing instance that was + * already present before do_locked started + */ + instance = resumed_instance; + } else if (resumed_instance == wrec->added.instance) { + /* + * The caller used dbwrap_watched_watch_add_instance() + * already during this do_locked() invocation. + */ + instance = resumed_instance; } else { tevent_req_nterror(req, NT_STATUS_REQUEST_NOT_ACCEPTED); return tevent_req_post(req, ev); @@ -1180,10 +1192,12 @@ static void dbwrap_watched_watch_done(struct tevent_req *subreq) * dbwrap_watched_record_wakeup(). */ talloc_set_destructor(state, NULL); + state->watcher.instance = 0; tevent_req_done(req); } NTSTATUS dbwrap_watched_watch_recv(struct tevent_req *req, + uint64_t *pkeep_instance, bool *blockerdead, struct server_id *blocker) { @@ -1195,6 +1209,14 @@ NTSTATUS dbwrap_watched_watch_recv(struct tevent_req *req, tevent_req_received(req); return status; } + if (pkeep_instance != NULL) { + *pkeep_instance = state->watcher.instance; + /* + * No need to remove ourselves anymore, + * the caller will take care of removing itself. + */ + talloc_set_destructor(state, NULL); + } if (blockerdead != NULL) { *blockerdead = state->blockerdead; } diff --git a/source3/lib/dbwrap/dbwrap_watch.h b/source3/lib/dbwrap/dbwrap_watch.h index a836ca48e8a..0731ad91641 100644 --- a/source3/lib/dbwrap/dbwrap_watch.h +++ b/source3/lib/dbwrap/dbwrap_watch.h @@ -27,11 +27,16 @@ struct db_context *db_open_watched(TALLOC_CTX *mem_ctx, struct db_context **backend, struct messaging_context *msg); +uint64_t dbwrap_watched_watch_add_instance(struct db_record *rec); +void dbwrap_watched_watch_remove_instance(struct db_record *rec, uint64_t instance); +void dbwrap_watched_watch_skip_alerting(struct db_record *rec); struct tevent_req *dbwrap_watched_watch_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, struct db_record *rec, + uint64_t resume_instance, struct server_id blocker); NTSTATUS dbwrap_watched_watch_recv(struct tevent_req *req, + uint64_t *pkeep_instance, bool *blockerdead, struct server_id *blocker); diff --git a/source3/lib/g_lock.c b/source3/lib/g_lock.c index 2194f7164e1..f7fc5544d26 100644 --- a/source3/lib/g_lock.c +++ b/source3/lib/g_lock.c @@ -571,7 +571,7 @@ static void g_lock_lock_fn( } state->watch_req = dbwrap_watched_watch_send( - state->req_state, state->req_state->ev, rec, blocker); + state->req_state, state->req_state->ev, rec, 0, blocker); if (state->watch_req == NULL) { state->status = NT_STATUS_NO_MEMORY; } @@ -657,7 +657,7 @@ static void g_lock_lock_retry(struct tevent_req *subreq) bool blockerdead = false; NTSTATUS status; - status = dbwrap_watched_watch_recv(subreq, &blockerdead, &blocker); + status = dbwrap_watched_watch_recv(subreq, NULL, &blockerdead, &blocker); DBG_DEBUG("watch_recv returned %s\n", nt_errstr(status)); TALLOC_FREE(subreq); @@ -1259,7 +1259,7 @@ static void g_lock_watch_data_send_fn( DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch); subreq = dbwrap_watched_watch_send( - state, state->ev, rec, state->blocker); + state, state->ev, rec, 0, state->blocker); if (subreq == NULL) { state->status = NT_STATUS_NO_MEMORY; return; @@ -1338,7 +1338,7 @@ static void g_lock_watch_data_done_fn( } subreq = dbwrap_watched_watch_send( - state, state->ev, rec, state->blocker); + state, state->ev, rec, 0, state->blocker); if (subreq == NULL) { state->status = NT_STATUS_NO_MEMORY; return; @@ -1357,7 +1357,7 @@ static void g_lock_watch_data_done(struct tevent_req *subreq) NTSTATUS status; status = dbwrap_watched_watch_recv( - subreq, &state->blockerdead, &state->blocker); + subreq, NULL, &state->blockerdead, &state->blocker); TALLOC_FREE(subreq); if (tevent_req_nterror(req, status)) { DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n", diff --git a/source3/smbd/smbXsrv_client.c b/source3/smbd/smbXsrv_client.c index 277ae1bab25..93eef753b62 100644 --- a/source3/smbd/smbXsrv_client.c +++ b/source3/smbd/smbXsrv_client.c @@ -545,6 +545,7 @@ static void smb2srv_client_mc_negprot_next(struct tevent_req *req) subreq = dbwrap_watched_watch_send(state, state->ev, state->db_rec, + 0, /* resume_instance */ global->server_id); if (tevent_req_nomem(subreq, req)) { return; @@ -675,7 +676,7 @@ static void smb2srv_client_mc_negprot_watched(struct tevent_req *subreq) struct tevent_req); NTSTATUS status; - status = dbwrap_watched_watch_recv(subreq, NULL, NULL); + status = dbwrap_watched_watch_recv(subreq, NULL, NULL, NULL); TALLOC_FREE(subreq); if (tevent_req_nterror(req, status)) { return; diff --git a/source3/smbd/smbXsrv_session.c b/source3/smbd/smbXsrv_session.c index 01512f3113f..e5a3065ff00 100644 --- a/source3/smbd/smbXsrv_session.c +++ b/source3/smbd/smbXsrv_session.c @@ -1096,6 +1096,7 @@ static void smb2srv_session_close_previous_check(struct tevent_req *req) } subreq = dbwrap_watched_watch_send(state, state->ev, state->db_rec, + 0, /* resume_instance */ (struct server_id){0}); if (tevent_req_nomem(subreq, req)) { TALLOC_FREE(state->db_rec); @@ -1151,7 +1152,7 @@ static void smb2srv_session_close_previous_modified(struct tevent_req *subreq) uint32_t global_id; NTSTATUS status; - status = dbwrap_watched_watch_recv(subreq, NULL, NULL); + status = dbwrap_watched_watch_recv(subreq, NULL, NULL, NULL); TALLOC_FREE(subreq); if (tevent_req_nterror(req, status)) { return; diff --git a/source3/torture/test_dbwrap_watch.c b/source3/torture/test_dbwrap_watch.c index d66bdaa9914..480b2c548d5 100644 --- a/source3/torture/test_dbwrap_watch.c +++ b/source3/torture/test_dbwrap_watch.c @@ -120,6 +120,7 @@ bool run_dbwrap_watch1(int dummy) goto fail; } req = dbwrap_watched_watch_send(talloc_tos(), ev, rec, + 0, /* resume_instance */ (struct server_id){0}); if (req == NULL) { fprintf(stderr, "dbwrap_record_watch_send failed\n"); @@ -146,7 +147,7 @@ bool run_dbwrap_watch1(int dummy) goto fail; } - status = dbwrap_watched_watch_recv(req, NULL, NULL); + status = dbwrap_watched_watch_recv(req, NULL, NULL, NULL); if (!NT_STATUS_IS_OK(status)) { fprintf(stderr, "dbwrap_record_watch_recv failed: %s\n", nt_errstr(status)); @@ -256,7 +257,7 @@ bool run_dbwrap_watch3(int dummy) } req = dbwrap_watched_watch_send( - db, ev, rec, (struct server_id) { 0 }); + db, ev, rec, 0, (struct server_id) { 0 }); if (req == NULL) { fprintf(stderr, "dbwrap_watched_watch_send failed\n"); exit(2); @@ -329,7 +330,7 @@ static void dbwrap_watch4_fn(struct db_record *rec, bool ok; state->req1 = dbwrap_watched_watch_send( - state->mem_ctx, state->ev, rec, (struct server_id) { .pid=0 }); + state->mem_ctx, state->ev, rec, 0, (struct server_id) { .pid=0 }); if (state->req1 == NULL) { goto nomem; } @@ -343,7 +344,7 @@ static void dbwrap_watch4_fn(struct db_record *rec, } state->req2 = dbwrap_watched_watch_send( - state->mem_ctx, state->ev, rec, (struct server_id) { .pid=0 }); + state->mem_ctx, state->ev, rec, 0, (struct server_id) { .pid=0 }); if (state->req2 == NULL) { goto nomem; } @@ -366,7 +367,7 @@ static void dbwrap_watch4_fn(struct db_record *rec, static void dbwrap_watch4_done1(struct tevent_req *subreq) { struct dbwrap_watch4_state *state = tevent_req_callback_data_void(subreq); - state->status1 = dbwrap_watched_watch_recv(subreq, NULL, NULL); + state->status1 = dbwrap_watched_watch_recv(subreq, NULL, NULL, NULL); TALLOC_FREE(subreq); printf("req1 finished: %s\n", nt_errstr(state->status1)); state->req1 = NULL; @@ -375,7 +376,7 @@ static void dbwrap_watch4_done1(struct tevent_req *subreq) static void dbwrap_watch4_done2(struct tevent_req *subreq) { struct dbwrap_watch4_state *state = tevent_req_callback_data_void(subreq); - state->status2 = dbwrap_watched_watch_recv(subreq, NULL, NULL); + state->status2 = dbwrap_watched_watch_recv(subreq, NULL, NULL, NULL); TALLOC_FREE(subreq); printf("req2 finished: %s\n", nt_errstr(state->status2)); state->req2 = NULL; |