summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilly Tarreau <w@1wt.eu>2021-08-28 11:07:31 +0200
committerWilly Tarreau <w@1wt.eu>2021-08-28 11:18:10 +0200
commitfe456c581f521dedb68a18c49f17f981f75a943b (patch)
tree997946e1157386a0b4b574b5dd37d2f0363b67c7
parente365aa28d4f30b3d98118ab460c856d3aa72c75e (diff)
downloadhaproxy-fe456c581f521dedb68a18c49f17f981f75a943b.tar.gz
MINOR: time: add report_idle() to report process-wide idle time
Before threads were introduced in 1.8, idle_pct used to be a global variable indicating the overall process idle time. Threads made it thread-local, meaning that its reporting in the stats made little sense, though this was not easy to spot. In 2.0, the idle_pct variable moved to the struct thread_info via commit 81036f273 ("MINOR: time: move the cpu, mono, and idle time to thread_info"). It made it more obvious that the idle_pct was per thread, and also allowed to more accurately measure it. But no more effort was made in that direction. This patch introduces a new report_idle() function that accurately averages the per-thread idle time over all running threads (i.e. it should remain valid even if some threads are paused or stopped), and makes use of it in the stats / "show info" reports. Sending traffic over only two connections of an 8-thread process would previously show this erratic CPU usage pattern: $ while :; do socat /tmp/sock1 - <<< "show info"|grep ^Idle;sleep 0.1;done Idle_pct: 30 Idle_pct: 35 Idle_pct: 100 Idle_pct: 100 Idle_pct: 100 Idle_pct: 100 Idle_pct: 100 Idle_pct: 100 Idle_pct: 35 Idle_pct: 33 Idle_pct: 100 Idle_pct: 100 Idle_pct: 100 Idle_pct: 100 Idle_pct: 100 Idle_pct: 100 Now it shows this more accurate measurement: $ while :; do socat /tmp/sock1 - <<< "show info"|grep ^Idle;sleep 0.1;done Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 Idle_pct: 83 This is not technically a bug but this lack of precision definitely affects some users who rely on the idle_pct measurement. This should at least be backported to 2.4, and might be to some older releases depending on users demand.
-rw-r--r--include/haproxy/time.h18
-rw-r--r--src/stats.c4
2 files changed, 19 insertions, 3 deletions
diff --git a/include/haproxy/time.h b/include/haproxy/time.h
index 4a3feb5df..c8358e00c 100644
--- a/include/haproxy/time.h
+++ b/include/haproxy/time.h
@@ -577,10 +577,26 @@ static inline void measure_idle()
if (samp_time < 500000)
return;
- ti->idle_pct = (100ULL * idle_time + samp_time / 2) / samp_time;
+ HA_ATOMIC_STORE(&ti->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
idle_time = samp_time = 0;
}
+/* report the average CPU idle percentage over all running threads, between 0 and 100 */
+static inline uint report_idle()
+{
+ uint total = 0;
+ uint rthr = 0;
+ uint thr;
+
+ for (thr = 0; thr < MAX_THREADS; thr++) {
+ if (!(all_threads_mask & (1UL << thr)))
+ continue;
+ total += HA_ATOMIC_LOAD(&ha_thread_info[thr].idle_pct);
+ rthr++;
+ }
+ return rthr ? total / rthr : 0;
+}
+
/* Collect date and time information before calling poll(). This will be used
* to count the run time of the past loop and the sleep time of the next poll.
*/
diff --git a/src/stats.c b/src/stats.c
index 35415b309..11198f1b9 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -3400,7 +3400,7 @@ static void stats_dump_html_info(struct stream_interface *si, struct uri_auth *u
actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
bps >= 1000000000UL ? (bps / 1000000000.0) : bps >= 1000000UL ? (bps / 1000000.0) : (bps / 1000.0),
bps >= 1000000000UL ? 'G' : bps >= 1000000UL ? 'M' : 'k',
- total_run_queues(), total_allocated_tasks(), ti->idle_pct
+ total_run_queues(), total_allocated_tasks(), report_idle()
);
/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
@@ -4433,7 +4433,7 @@ int stats_fill_info(struct field *info, int len, uint flags)
#endif
info[INF_TASKS] = mkf_u32(0, total_allocated_tasks());
info[INF_RUN_QUEUE] = mkf_u32(0, total_run_queues());
- info[INF_IDLE_PCT] = mkf_u32(FN_AVG, ti->idle_pct);
+ info[INF_IDLE_PCT] = mkf_u32(FN_AVG, report_idle());
info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
if (global.desc)
info[INF_DESCRIPTION] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.desc);