From cea9c30fa549885e36471f1782359df2bdcf895a Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Wed, 19 Apr 2023 16:16:27 -0400 Subject: Move ar_hint to ar_table_struct This allows Hashes with ST tables to fit int he 80 byte size pool. --- gc.c | 6 +++--- hash.c | 31 +++++++++++++++++-------------- internal/hash.h | 14 ++++---------- test/ruby/test_gc_compact.rb | 3 ++- test/ruby/test_shapes.rb | 2 ++ 5 files changed, 28 insertions(+), 28 deletions(-) diff --git a/gc.c b/gc.c index 0f64efefa5..ab9d1bc857 100644 --- a/gc.c +++ b/gc.c @@ -8404,11 +8404,11 @@ gc_compact_destination_pool(rb_objspace_t *objspace, rb_size_pool_t *src_pool, V break; case T_HASH: - obj_size = RHASH_SLOT_SIZE; + obj_size = sizeof(struct RHash) + (RHASH_ST_TABLE_P(src) ? sizeof(st_table) : sizeof(ar_table)); break; - default: - return src_pool; + default: + return src_pool; } if (rb_gc_size_allocatable_p(obj_size)){ diff --git a/hash.c b/hash.c index cef784cccc..63a51f54c4 100644 --- a/hash.c +++ b/hash.c @@ -393,13 +393,13 @@ ar_do_hash_hint(st_hash_t hash_value) static inline ar_hint_t ar_hint(VALUE hash, unsigned int index) { - return RHASH(hash)->ar_hint.ary[index]; + return RHASH_AR_TABLE(hash)->ar_hint.ary[index]; } static inline void ar_hint_set_hint(VALUE hash, unsigned int index, ar_hint_t hint) { - RHASH(hash)->ar_hint.ary[index] = hint; + RHASH_AR_TABLE(hash)->ar_hint.ary[index] = hint; } static inline void @@ -644,7 +644,7 @@ static unsigned ar_find_entry_hint(VALUE hash, ar_hint_t hint, st_data_t key) { unsigned i, bound = RHASH_AR_TABLE_BOUND(hash); - const ar_hint_t *hints = RHASH(hash)->ar_hint.ary; + const ar_hint_t *hints = RHASH_AR_TABLE(hash)->ar_hint.ary; /* if table is NULL, then bound also should be 0 */ @@ -1164,8 +1164,8 @@ ar_copy(VALUE hash1, VALUE hash2) new_tab = ar_alloc_table(hash1); } - memcpy(new_tab, old_tab, sizeof(ar_table)); - RHASH(hash1)->ar_hint.word = RHASH(hash2)->ar_hint.word; + *new_tab = *old_tab; + RHASH_AR_TABLE(hash1)->ar_hint.word = RHASH_AR_TABLE(hash2)->ar_hint.word; RHASH_AR_TABLE_BOUND_SET(hash1, RHASH_AR_TABLE_BOUND(hash2)); RHASH_AR_TABLE_SIZE_SET(hash1, RHASH_AR_TABLE_SIZE(hash2)); @@ -1425,10 +1425,12 @@ rb_hash_foreach(VALUE hash, rb_foreach_func *func, VALUE farg) } static VALUE -hash_alloc_flags(VALUE klass, VALUE flags, VALUE ifnone) +hash_alloc_flags(VALUE klass, VALUE flags, VALUE ifnone, bool st) { const VALUE wb = (RGENGC_WB_PROTECTED_HASH ? FL_WB_PROTECTED : 0); - NEWOBJ_OF(hash, struct RHash, klass, T_HASH | wb | flags, RHASH_SLOT_SIZE, 0); + const size_t size = sizeof(struct RHash) + (st ? sizeof(st_table) : sizeof(ar_table)); + + NEWOBJ_OF(hash, struct RHash, klass, T_HASH | wb | flags, size, 0); RHASH_SET_IFNONE((VALUE)hash, ifnone); @@ -1438,7 +1440,8 @@ hash_alloc_flags(VALUE klass, VALUE flags, VALUE ifnone) static VALUE hash_alloc(VALUE klass) { - return hash_alloc_flags(klass, 0, Qnil); + /* Allocate to be able to fit both st_table and ar_table. */ + return hash_alloc_flags(klass, 0, Qnil, sizeof(st_table) > sizeof(ar_table)); } static VALUE @@ -1467,13 +1470,13 @@ copy_compare_by_id(VALUE hash, VALUE basis) VALUE rb_hash_new_with_size(st_index_t size) { - VALUE ret = rb_hash_new(); - if (size == 0) { - /* do nothing */ - } - else if (size > RHASH_AR_TABLE_MAX_SIZE) { + bool st = size > RHASH_AR_TABLE_MAX_SIZE; + VALUE ret = hash_alloc_flags(rb_cHash, 0, Qnil, st); + + if (st) { hash_st_table_init(ret, &objhash, size); } + return ret; } @@ -1506,7 +1509,7 @@ hash_dup_with_compare_by_id(VALUE hash) static VALUE hash_dup(VALUE hash, VALUE klass, VALUE flags) { - return hash_copy(hash_alloc_flags(klass, flags, RHASH_IFNONE(hash)), + return hash_copy(hash_alloc_flags(klass, flags, RHASH_IFNONE(hash), !RHASH_EMPTY_P(hash) && RHASH_ST_TABLE_P(hash)), hash); } diff --git a/internal/hash.h b/internal/hash.h index c9a8b49c20..248a53429d 100644 --- a/internal/hash.h +++ b/internal/hash.h @@ -46,6 +46,10 @@ typedef struct ar_table_pair_struct { } ar_table_pair; typedef struct ar_table_struct { + union { + ar_hint_t ary[RHASH_AR_TABLE_MAX_SIZE]; + VALUE word; + } ar_hint; /* 64bit CPU: 8B * 2 * 8 = 128B */ ar_table_pair pairs[RHASH_AR_TABLE_MAX_SIZE]; } ar_table; @@ -53,20 +57,10 @@ typedef struct ar_table_struct { struct RHash { struct RBasic basic; const VALUE ifnone; - union { - ar_hint_t ary[RHASH_AR_TABLE_MAX_SIZE]; - VALUE word; - } ar_hint; }; #define RHASH(obj) ((struct RHash *)(obj)) -#ifndef MAX -# define MAX(a, b) (((a) > (b)) ? (a) : (b)) -#endif - -#define RHASH_SLOT_SIZE (sizeof(struct RHash) + MAX(sizeof(ar_table), sizeof(st_table))) - #ifdef RHASH_IFNONE # undef RHASH_IFNONE #endif diff --git a/test/ruby/test_gc_compact.rb b/test/ruby/test_gc_compact.rb index b1b964589a..221ca677e3 100644 --- a/test/ruby/test_gc_compact.rb +++ b/test/ruby/test_gc_compact.rb @@ -419,6 +419,8 @@ class TestGCCompact < Test::Unit::TestCase def test_moving_hashes_down_size_pools omit if GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT] == 1 + # AR and ST hashes are in the same size pool on 32 bit + omit unless RbConfig::SIZEOF["uint64_t"] <= RbConfig::SIZEOF["void*"] assert_separately(%w[-robjspace], "#{<<~"begin;"}\n#{<<~"end;"}", timeout: 10, signal: :SEGV) begin; @@ -433,7 +435,6 @@ class TestGCCompact < Test::Unit::TestCase stats = GC.verify_compaction_references(expand_heap: true, toward: :empty) assert_operator(stats[:moved_down][:T_HASH], :>=, HASH_COUNT) - assert_include(ObjectSpace.dump(ary[0]), '"slot_size":' + GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE].to_s) end; end diff --git a/test/ruby/test_shapes.rb b/test/ruby/test_shapes.rb index d9cce4a337..ebc94a12d2 100644 --- a/test/ruby/test_shapes.rb +++ b/test/ruby/test_shapes.rb @@ -373,6 +373,8 @@ class TestShapes < Test::Unit::TestCase end def test_hash_has_correct_pool_shape + omit "SHAPE_IN_BASIC_FLAGS == 0" unless RbConfig::SIZEOF["uint64_t"] <= RbConfig::SIZEOF["void*"] + # All hashes are now allocated their own ar_table, so start in a # larger pool, and have already transitioned once. assert_shape_equal(RubyVM::Shape.root_shape, RubyVM::Shape.of({}).parent) -- cgit v1.2.1