summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>2019-01-22 17:30:48 +0100
committerLukáš Nykrýn <lnykryn@redhat.com>2019-02-07 12:57:43 +0100
commit8da81d2aba2768ced497790cc05b9f73c6268833 (patch)
tree236306c296099586b871ff7c254d07e6c2a41ab6
parentde72fa6b0582b95216215cc1400412fe91bc8ba3 (diff)
downloadsystemd-8da81d2aba2768ced497790cc05b9f73c6268833.tar.gz
journald: periodically drop cache for all dead PIDs
In normal use, this allow us to drop dead entries from the cache and reduces the cache size so that we don't evict entries unnecessarily. The time limit is there mostly to serve as a guard against malicious logging from many different PIDs. (cherry-picked from commit 91714a7f427a6c9c5c3be8b3819fee45050028f3) Related: #1664976
-rw-r--r--src/journal/journald-context.c28
-rw-r--r--src/journal/journald-server.h2
2 files changed, 28 insertions, 2 deletions
diff --git a/src/journal/journald-context.c b/src/journal/journald-context.c
index 0f0dc1de4d..51f79fd803 100644
--- a/src/journal/journald-context.c
+++ b/src/journal/journald-context.c
@@ -541,15 +541,39 @@ refresh:
}
static void client_context_try_shrink_to(Server *s, size_t limit) {
+ ClientContext *c;
+ usec_t t;
+
assert(s);
+ /* Flush any cache entries for PIDs that have already moved on. Don't do this
+ * too often, since it's a slow process. */
+ t = now(CLOCK_MONOTONIC);
+ if (s->last_cache_pid_flush + MAX_USEC < t) {
+ unsigned n = prioq_size(s->client_contexts_lru), idx = 0;
+
+ /* We do a number of iterations based on the initial size of the prioq. When we remove an
+ * item, a new item is moved into its places, and items to the right might be reshuffled.
+ */
+ for (unsigned i = 0; i < n; i++) {
+ c = prioq_peek_by_index(s->client_contexts_lru, idx);
+
+ assert(c->n_ref == 0);
+
+ if (!pid_is_unwaited(c->pid))
+ client_context_free(s, c);
+ else
+ idx ++;
+ }
+
+ s->last_cache_pid_flush = t;
+ }
+
/* Bring the number of cache entries below the indicated limit, so that we can create a new entry without
* breaching the limit. Note that we only flush out entries that aren't pinned here. This means the number of
* cache entries may very well grow beyond the limit, if all entries stored remain pinned. */
while (hashmap_size(s->client_contexts) > limit) {
- ClientContext *c;
-
c = prioq_pop(s->client_contexts_lru);
if (!c)
break; /* All remaining entries are pinned, give up */
diff --git a/src/journal/journald-server.h b/src/journal/journald-server.h
index 983be8bb89..c6c9b1fb1d 100644
--- a/src/journal/journald-server.h
+++ b/src/journal/journald-server.h
@@ -163,6 +163,8 @@ struct Server {
Hashmap *client_contexts;
Prioq *client_contexts_lru;
+ usec_t last_cache_pid_flush;
+
ClientContext *my_context; /* the context of journald itself */
ClientContext *pid1_context; /* the context of PID 1 */
};