summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2012-07-14 21:52:50 -0700
committerdormando <dormando@rydia.net>2012-07-27 12:47:16 -0700
commit845a4fe1c47d8ddfc330b8184c2cb5ceec8c0a69 (patch)
treebb3e86b7094cffd72a62d34eafcafd68eff2619b
parentd707b99d92dbc7c36d08be223411f9fe26ed8bfc (diff)
downloadmemcached-845a4fe1c47d8ddfc330b8184c2cb5ceec8c0a69.tar.gz
pre-split slab pages into slab freelists
slab freelists used to be malloc'ed arrays. then they were changed into a freelist. now we pre-split newly assigned/moved pages into a slabs freelist instead of lazily pulling pointers as needed. The loop is pretty darn direct and I can't measure a performance impact of this relatively rare event. In doing this, slab reassign can move memory without having to wait for a class to chew through its recently assigned page first.
-rw-r--r--slabs.c21
-rw-r--r--t/slabs_reassign.t12
2 files changed, 24 insertions, 9 deletions
diff --git a/slabs.c b/slabs.c
index 2483fbf..4180f76 100644
--- a/slabs.c
+++ b/slabs.c
@@ -61,6 +61,7 @@ static pthread_mutex_t slabs_lock = PTHREAD_MUTEX_INITIALIZER;
*/
static int do_slabs_newslab(const unsigned int id);
static void *memory_allocate(size_t size);
+static void do_slabs_free(void *ptr, const size_t size, unsigned int id);
#ifndef DONT_PREALLOC_SLABS
/* Preallocate as many slab pages as possible (called from slabs_init)
@@ -189,6 +190,15 @@ static int grow_slab_list (const unsigned int id) {
return 1;
}
+static void split_slab_page_into_freelist(char *ptr, const unsigned int id) {
+ slabclass_t *p = &slabclass[id];
+ int x;
+ for (x = 0; x < p->perslab; x++) {
+ do_slabs_free(ptr, 0, id);
+ ptr += p->size;
+ }
+}
+
static int do_slabs_newslab(const unsigned int id) {
slabclass_t *p = &slabclass[id];
int len = settings.slab_reassign ? settings.item_size_max
@@ -204,8 +214,9 @@ static int do_slabs_newslab(const unsigned int id) {
}
memset(ptr, 0, (size_t)len);
- p->end_page_ptr = ptr;
- p->end_page_free = p->perslab;
+ split_slab_page_into_freelist(ptr, id);
+// p->end_page_ptr = ptr;
+// p->end_page_free = p->perslab;
p->slab_list[p->slabs++] = ptr;
mem_malloced += len;
@@ -635,8 +646,10 @@ static void slab_rebalance_finish(void) {
memset(slab_rebal.slab_start, 0, (size_t)settings.item_size_max);
d_cls->slab_list[d_cls->slabs++] = slab_rebal.slab_start;
- d_cls->end_page_ptr = slab_rebal.slab_start;
- d_cls->end_page_free = d_cls->perslab;
+ split_slab_page_into_freelist(slab_rebal.slab_start,
+ slab_rebal.d_clsid);
+// d_cls->end_page_ptr = slab_rebal.slab_start;
+// d_cls->end_page_free = d_cls->perslab;
slab_rebal.done = 0;
slab_rebal.s_clsid = 0;
diff --git a/t/slabs_reassign.t b/t/slabs_reassign.t
index cf4b6f8..ae5ddce 100644
--- a/t/slabs_reassign.t
+++ b/t/slabs_reassign.t
@@ -2,7 +2,7 @@
use strict;
use warnings;
-use Test::More tests => 131;
+use Test::More tests => 130;
use FindBin qw($Bin);
use lib "$Bin/lib";
use MemcachedTest;
@@ -52,14 +52,16 @@ ok($slabs_before->{"31:total_pages"} != $slabs_after->{"31:total_pages"},
ok($slabs_before->{"25:total_pages"} != $slabs_after->{"25:total_pages"},
"slab 25 pagecount changed");
-# Try to move another slab, see that it complains
+# Try to move another slab, see that you can move two in a row
print $sock "slabs reassign 31 25\r\n";
-like(scalar <$sock>, qr/^NOTFULL/, "Cannot re-run against class with empty space");
+like(scalar <$sock>, qr/^OK/, "Cannot re-run against class with empty space");
# Try to move a page backwards. Should complain that source class isn't "safe"
# to move from.
-print $sock "slabs reassign 25 31\r\n";
-like(scalar <$sock>, qr/^UNSAFE/, "Cannot move an unsafe slab back");
+# TODO: Wait until the above command completes, then try to move it back?
+# Seems pointless...
+#print $sock "slabs reassign 25 31\r\n";
+#like(scalar <$sock>, qr/^UNSAFE/, "Cannot move an unsafe slab back");
# Try to insert items into both slabs
print $sock "set bfoo51 0 0 70000\r\n", $bigdata, "\r\n";