summaryrefslogtreecommitdiff
path: root/source3/smbd/blocking.c
diff options
context:
space:
mode:
Diffstat (limited to 'source3/smbd/blocking.c')
-rw-r--r--source3/smbd/blocking.c84
1 files changed, 84 insertions, 0 deletions
diff --git a/source3/smbd/blocking.c b/source3/smbd/blocking.c
index 514985bcd75..fbd1ea812f3 100644
--- a/source3/smbd/blocking.c
+++ b/source3/smbd/blocking.c
@@ -103,6 +103,7 @@ struct smbd_smb1_do_locks_state {
struct files_struct *fsp;
uint32_t timeout;
uint32_t polling_msecs;
+ uint32_t retry_msecs;
struct timeval endtime;
bool large_offset; /* required for correct cancel */
enum brl_flavour lock_flav;
@@ -187,6 +188,33 @@ set_endtime:
(state->timeout % 1000) * 1000);
}
+static void smbd_smb1_do_locks_update_retry_msecs(
+ struct smbd_smb1_do_locks_state *state)
+{
+ /*
+ * The default lp_lock_spin_time() is 200ms,
+ * we just use half of it to trigger the first retry.
+ *
+ * v_min is in the range of 0.001 to 10 secs
+ * (0.1 secs by default)
+ *
+ * v_max is in the range of 0.01 to 100 secs
+ * (1.0 secs by default)
+ *
+ * The typical steps are:
+ * 0.1, 0.2, 0.3, 0.4, ... 1.0
+ */
+ uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
+ uint32_t v_max = 10 * v_min;
+
+ if (state->retry_msecs >= v_max) {
+ state->retry_msecs = v_max;
+ return;
+ }
+
+ state->retry_msecs += v_min;
+}
+
static void smbd_smb1_do_locks_update_polling_msecs(
struct smbd_smb1_do_locks_state *state)
{
@@ -459,9 +487,60 @@ static void smbd_smb1_do_locks_try(struct tevent_req *req)
if (NT_STATUS_IS_OK(status)) {
goto done;
}
+ if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
+ /*
+ * We got NT_STATUS_RETRY,
+ * we reset polling_msecs so that
+ * that the retries based on LOCK_NOT_GRANTED
+ * will later start with small intervalls again.
+ */
+ state->polling_msecs = 0;
+
+ /*
+ * The backend wasn't able to decide yet.
+ * We need to wait even for non-blocking
+ * locks.
+ *
+ * The backend uses blocking_smblctx == UINT64_MAX
+ * to indicate that we should use retry timers.
+ *
+ * It uses blocking_smblctx == 0 to indicate
+ * it will use share_mode_wakeup_waiters()
+ * to wake us. Note that unrelated changes in
+ * locking.tdb may cause retries.
+ */
+
+ if (blocking_smblctx != UINT64_MAX) {
+ SMB_ASSERT(blocking_smblctx == 0);
+ goto setup_retry;
+ }
+
+ smbd_smb1_do_locks_update_retry_msecs(state);
+
+ DBG_DEBUG("Waiting for a backend decision. "
+ "Retry in %"PRIu32" msecs\n",
+ state->retry_msecs);
+
+ /*
+ * We completely ignore state->endtime here
+ * we we'll wait for a backend decision forever.
+ * If the backend is smart enough to implement
+ * some NT_STATUS_RETRY logic, it has to
+ * switch to any other status after in order
+ * to avoid waiting forever.
+ */
+ endtime = timeval_current_ofs_msec(state->retry_msecs);
+ goto setup_retry;
+ }
if (!ERROR_WAS_LOCK_DENIED(status)) {
goto done;
}
+ /*
+ * We got LOCK_NOT_GRANTED, make sure
+ * a following STATUS_RETRY will start
+ * with short intervalls again.
+ */
+ state->retry_msecs = 0;
smbd_smb1_do_locks_setup_timeout(state, &state->locks[state->blocker]);
DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
@@ -503,6 +582,7 @@ static void smbd_smb1_do_locks_try(struct tevent_req *req)
endtime = timeval_min(&endtime, &tmp);
}
+setup_retry:
subreq = dbwrap_watched_watch_send(
state, state->ev, lck->data->record, blocking_pid);
if (tevent_req_nomem(subreq, req)) {
@@ -511,6 +591,10 @@ static void smbd_smb1_do_locks_try(struct tevent_req *req)
TALLOC_FREE(lck);
tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
+ if (timeval_is_zero(&endtime)) {
+ return;
+ }
+
ok = tevent_req_set_endtime(subreq, state->ev, endtime);
if (!ok) {
status = NT_STATUS_NO_MEMORY;