diff options
author | Kjell Winblad <kjellwinblad@gmail.com> | 2019-04-16 16:33:01 +0200 |
---|---|---|
committer | Kjell Winblad <kjellwinblad@gmail.com> | 2019-09-25 15:35:02 +0200 |
commit | ce7dbe8742e66f4632b5d39a9b4d7aa461e4f164 (patch) | |
tree | 508c903fc327f3d8ca78f090c22ee63afc1336a7 | |
parent | 4b01bd498cbf0dc30d448096661b331921e12a04 (diff) | |
download | erlang-ce7dbe8742e66f4632b5d39a9b4d7aa461e4f164.tar.gz |
Use decentralized counters for ETS tables with write_concurrency
This commit enables decentralized counters by default in all public
ETS tables when the `write_concurrency` option is turned on. Tables of
type `ordered_set` already had support for decentralized counters so
this commit only affects tables of type `set`, `bag` and
`duplicate_bag`.
[Experiments][1] indicate that this change substantially improves the
scalability in ETS table scenarios with frequent `ets:insert/2` and
`ets:delete/2` calls when many processor cores are being utilized.
[1]: http://winsh.me/ets_catree_benchmark/decent_ctrs_hash.html
-rw-r--r-- | erts/emulator/beam/erl_alloc.types | 1 | ||||
-rw-r--r-- | erts/emulator/beam/erl_bif_info.c | 14 | ||||
-rw-r--r-- | erts/emulator/beam/erl_db.c | 21 | ||||
-rw-r--r-- | erts/emulator/beam/erl_db_hash.c | 227 | ||||
-rw-r--r-- | erts/emulator/beam/erl_db_hash.h | 7 | ||||
-rw-r--r-- | erts/emulator/beam/erl_db_tree.c | 7 | ||||
-rw-r--r-- | erts/emulator/beam/erl_db_util.h | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_flxctr.c | 80 | ||||
-rw-r--r-- | erts/emulator/beam/erl_flxctr.h | 18 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.h | 1 | ||||
-rw-r--r-- | lib/stdlib/test/ets_SUITE.erl | 100 |
11 files changed, 362 insertions, 116 deletions
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 3e643a6223..c446860a78 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -195,6 +195,7 @@ type DB_DMC_ERR_INFO ETS ETS db_dmc_error_info type DB_TERM ETS ETS db_term type DB_PROC_CLEANUP SHORT_LIVED ETS db_proc_cleanup_state type ETS_ALL_REQ SHORT_LIVED ETS ets_all_request +type ETS_CTRS ETS ETS ets_decentralized_ctrs type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf type INFO_DSBUF SYSTEM SYSTEM info_dsbuf diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 43be78715b..0762876888 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -4053,7 +4053,16 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) BIF_RET(am_notsup); #endif } - + else if (ERTS_IS_ATOM_STR("flxctr_memory_usage", BIF_ARG_1)) { + Sint mem = erts_flxctr_debug_memory_usage(); + if (mem == -1) { + BIF_RET(am_notsup); + } else { + Uint hsz = BIG_UWORD_HEAP_SIZE((UWord)mem); + Eterm *hp = HAlloc(BIF_P, hsz); + BIF_RET(uword_to_big((UWord)mem, hp)); + } + } } else if (is_tuple(BIF_ARG_1)) { Eterm* tp = tuple_val(BIF_ARG_1); @@ -4642,6 +4651,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS; else if (ERTS_IS_ATOM_STR("aux_work", BIF_ARG_2)) flag = ERTS_DEBUG_WAIT_COMPLETED_AUX_WORK; + else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_2)) + flag = ERTS_DEBUG_WAIT_COMPLETED_THREAD_PROGRESS; if (flag && erts_debug_wait_completed(BIF_P, flag)) { ERTS_BIF_YIELD_RETURN(BIF_P, am_ok); @@ -4723,7 +4734,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) else if (ERTS_IS_ATOM_STR("ets_debug_random_split_join", BIF_ARG_1)) { if (is_tuple(BIF_ARG_2)) { Eterm* tpl = tuple_val(BIF_ARG_2); - if (erts_ets_debug_random_split_join(tpl[1], tpl[2] == am_true)) BIF_RET(am_ok); } diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index a78e6b53ae..cfccb05219 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -406,9 +406,11 @@ static void free_dbtable(void *vtb) { DbTable *tb = (DbTable *) vtb; + erts_flxctr_add(&tb->common.counters, + ERTS_DB_TABLE_MEM_COUNTER_ID, + -((Sint)erts_flxctr_nr_of_allocated_bytes(&tb->common.counters))); ASSERT(erts_flxctr_is_snapshot_ongoing(&tb->common.counters) || - sizeof(DbTable) == erts_flxctr_read_approx(&tb->common.counters, - ERTS_DB_TABLE_MEM_COUNTER_ID)); + sizeof(DbTable) == DB_GET_APPROX_MEM_CONSUMED(tb)); erts_rwmtx_destroy(&tb->common.rwlock); erts_mtx_destroy(&tb->common.fixlock); @@ -417,7 +419,7 @@ free_dbtable(void *vtb) if (tb->common.btid) erts_bin_release(tb->common.btid); - erts_flxctr_destroy(&tb->common.counters, ERTS_ALC_T_DB_TABLE); + erts_flxctr_destroy(&tb->common.counters, ERTS_ALC_T_ETS_CTRS); erts_free(ERTS_ALC_T_DB_TABLE, tb); } @@ -1742,16 +1744,17 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) */ { DbTable init_tb; - erts_flxctr_init(&init_tb.common.counters, 0, 2, ERTS_ALC_T_DB_TABLE); + erts_flxctr_init(&init_tb.common.counters, 0, 2, ERTS_ALC_T_ETS_CTRS); tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); erts_flxctr_init(&tb->common.counters, - status & DB_CA_ORDERED_SET, + status & DB_FINE_LOCKED, 2, - ERTS_ALC_T_DB_TABLE); + ERTS_ALC_T_ETS_CTRS); erts_flxctr_add(&tb->common.counters, ERTS_DB_TABLE_MEM_COUNTER_ID, - DB_GET_APPROX_MEM_CONSUMED(&init_tb)); + DB_GET_APPROX_MEM_CONSUMED(&init_tb) + + erts_flxctr_nr_of_allocated_bytes(&tb->common.counters)); } tb->common.meth = meth; @@ -3349,7 +3352,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) if (!is_ctrs_read_result_set) { ErtsFlxCtrSnapshotResult res = - erts_flxctr_snapshot(&tb->common.counters, ERTS_ALC_T_DB_TABLE, BIF_P); + erts_flxctr_snapshot(&tb->common.counters, ERTS_ALC_T_ETS_CTRS, BIF_P); if (ERTS_FLXCTR_GET_RESULT_AFTER_TRAP == res.type) { Eterm tuple; db_unlock(tb, LCK_READ); @@ -3426,7 +3429,7 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2) } if (BIF_ARG_2 == am_size || BIF_ARG_2 == am_memory) { ErtsFlxCtrSnapshotResult res = - erts_flxctr_snapshot(&tb->common.counters, ERTS_ALC_T_DB_TABLE, BIF_P); + erts_flxctr_snapshot(&tb->common.counters, ERTS_ALC_T_ETS_CTRS, BIF_P); if (ERTS_FLXCTR_GET_RESULT_AFTER_TRAP == res.type) { db_unlock(tb, LCK_READ); BIF_TRAP2(&bif_trap_export[BIF_ets_info_2], BIF_P, res.trap_resume_state, BIF_ARG_2); diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 2bcdb47a54..399d0d5155 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -85,12 +85,44 @@ #include "erl_db_hash.h" -#define ADD_NITEMS(DB, TO_ADD) \ - erts_flxctr_add(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID, TO_ADD) -#define INC_NITEMS(DB) \ - erts_flxctr_inc_read_centralized(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID) -#define DEC_NITEMS(DB) \ - erts_flxctr_dec_read_centralized(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID) +#define IS_DECENTRALIZED_CTRS(DB) ((DB)->common.counters.is_decentralized) +#define NITEMS_ESTIMATE(DB, LCK_CTR, HASH) \ + (IS_DECENTRALIZED_CTRS(DB) ? \ + (DB_HASH_LOCK_CNT * (LCK_CTR != NULL ? LCK_CTR->nitems : GET_LOCK_AND_CTR(DB,HASH)->nitems)) : \ + erts_flxctr_read_centralized(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)) +#define ADD_NITEMS(DB, LCK_CTR, HASH, TO_ADD) \ + do { \ + if (IS_DECENTRALIZED_CTRS(DB)) { \ + if (LCK_CTR != NULL) { \ + LCK_CTR->nitems += TO_ADD; \ + } else { \ + GET_LOCK_AND_CTR(DB,HASH)->nitems += TO_ADD; \ + } \ + } \ + erts_flxctr_add(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID, TO_ADD); \ + } while(0) +#define INC_NITEMS(DB, LCK_CTR, HASH) \ + do { \ + if (IS_DECENTRALIZED_CTRS(DB)) { \ + if (LCK_CTR != NULL) { \ + LCK_CTR->nitems++; \ + } else { \ + GET_LOCK_AND_CTR(DB,HASH)->nitems++; \ + } \ + } \ + erts_flxctr_inc(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID); \ + } while(0) +#define DEC_NITEMS(DB, LCK_CTR, HASH) \ + do { \ + if (IS_DECENTRALIZED_CTRS(DB)) { \ + if (LCK_CTR != NULL) { \ + LCK_CTR->nitems--; \ + } else { \ + GET_LOCK_AND_CTR(DB,HASH)->nitems--; \ + } \ + } \ + erts_flxctr_dec(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID); \ + } while(0) #define RESET_NITEMS(DB) \ erts_flxctr_reset(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID) @@ -127,9 +159,6 @@ : ((struct segment**) erts_atomic_read_nob(&(tb)->segtab))) #endif #define NACTIVE(tb) ((int)erts_atomic_read_nob(&(tb)->nactive)) -#define NITEMS(tb) \ - ((Sint)erts_flxctr_read_centralized(&(tb)->common.counters, \ - ERTS_DB_TABLE_NITEMS_COUNTER_ID)) #define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP) @@ -227,7 +256,8 @@ static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p) make_internal_hash(term, 0)) & MAX_HASH_MASK) # define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1) -# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck) +# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck_ctr.lck) +# define GET_LOCK_AND_CTR(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck_ctr) # define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval)) /* Fine grained read lock */ @@ -255,6 +285,20 @@ static ERTS_INLINE erts_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval) } } +/* Fine grained write lock */ +static ERTS_INLINE +DbTableHashLockAndCounter* WLOCK_HASH_GET_LCK_AND_CTR(DbTableHash* tb, HashValue hval) +{ + if (tb->common.is_thread_safe) { + return NULL; + } else { + DbTableHashLockAndCounter* lck_ctr = GET_LOCK_AND_CTR(tb,hval); + ASSERT(tb->common.type & DB_FINE_LOCKED); + erts_rwmtx_rwlock(&lck_ctr->lck); + return lck_ctr; + } +} + static ERTS_INLINE void RUNLOCK_HASH(erts_rwmtx_t* lck) { if (lck != NULL) { @@ -269,6 +313,13 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_rwmtx_t* lck) } } +static ERTS_INLINE void WUNLOCK_HASH_LCK_CTR(DbTableHashLockAndCounter* lck_ctr) +{ + if (lck_ctr != NULL) { + erts_rwmtx_rwunlock(&lck_ctr->lck); + } +} + #ifdef ERTS_ENABLE_LOCK_CHECK # define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd)) @@ -477,9 +528,8 @@ db_get_binary_info_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret); static int db_raw_first_hash(Process* p, DbTable *tbl, Eterm *ret); static int db_raw_next_hash(Process* p, DbTable *tbl, Eterm key, Eterm *ret); -static ERTS_INLINE void try_shrink(DbTableHash* tb) +static ERTS_INLINE void try_shrink(DbTableHash* tb, Sint nitems) { - int nitems = NITEMS(tb); if (nitems < SHRINK_LIMIT(tb) && !IS_FIXED(tb)) { shrink(tb, nitems); } @@ -717,8 +767,9 @@ int db_create_hash(Process *p, DbTable *tbl) (DbTable *) tb, sizeof(DbTableHashFineLocks)); for (i=0; i<DB_HASH_LOCK_CNT; ++i) { - erts_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt, + erts_rwmtx_init_opt(&tb->locks->lck_vec[i].lck_ctr.lck, &rwmtx_opt, "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); + tb->locks->lck_vec[i].lck_ctr.nitems = 0; } /* This important property is needed to guarantee the two buckets * involved in a grow/shrink operation it protected by the same lock: @@ -807,13 +858,13 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) HashDbTerm** bp; HashDbTerm* b; HashDbTerm* q; - erts_rwmtx_t* lck; - int nitems; + DbTableHashLockAndCounter* lck_ctr; + Sint nitems; int ret = DB_ERROR_NONE; key = GETKEY(tb, tuple_val(obj)); hval = MAKE_HASH(key); - lck = WLOCK_HASH(tb, hval); + lck_ctr = WLOCK_HASH_GET_LCK_AND_CTR(tb, hval); ix = hash_to_ix(tb, hval); bp = &BUCKET(tb, ix); b = *bp; @@ -833,7 +884,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) if (tb->common.status & DB_SET) { HashDbTerm* bnext = b->next; if (is_pseudo_deleted(b)) { - INC_NITEMS(tb); + INC_NITEMS(tb, lck_ctr, hval); b->pseudo_deleted = 0; } else if (key_clash_fail) { @@ -862,7 +913,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) do { if (db_eq(&tb->common,obj,&q->dbterm)) { if (is_pseudo_deleted(q)) { - INC_NITEMS(tb); + INC_NITEMS(tb, lck_ctr, hval); q->pseudo_deleted = 0; ASSERT(q->hvalue == hval); if (q != b) { /* must move to preserve key insertion order */ @@ -885,10 +936,11 @@ Lnew: q->pseudo_deleted = 0; q->next = b; *bp = q; - nitems = INC_NITEMS(tb); - WUNLOCK_HASH(lck); + INC_NITEMS(tb, lck_ctr, hval); + nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval); + WUNLOCK_HASH_LCK_CTR(lck_ctr); { - int nactive = NACTIVE(tb); + int nactive = NACTIVE(tb); if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { grow(tb, nitems); } @@ -896,7 +948,7 @@ Lnew: return DB_ERROR_NONE; Ldone: - WUNLOCK_HASH(lck); + WUNLOCK_HASH_LCK_CTR(lck_ctr); return ret; } @@ -1050,11 +1102,11 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) HashDbTerm** bp; HashDbTerm* b; HashDbTerm* free_us = NULL; - erts_rwmtx_t* lck; + DbTableHashLockAndCounter* lck_ctr; int nitems_diff = 0; - + Sint nitems; hval = MAKE_HASH(key); - lck = WLOCK_HASH(tb,hval); + lck_ctr = WLOCK_HASH_GET_LCK_AND_CTR(tb,hval); ix = hash_to_ix(tb, hval); bp = &BUCKET(tb, ix); b = *bp; @@ -1081,10 +1133,13 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) bp = &b->next; b = b->next; } - WUNLOCK_HASH(lck); if (nitems_diff) { - ADD_NITEMS(tb, nitems_diff); - try_shrink(tb); + ADD_NITEMS(tb, lck_ctr, hval, nitems_diff); + nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval); + } + WUNLOCK_HASH_LCK_CTR(lck_ctr); + if (nitems_diff) { + try_shrink(tb, nitems); } free_term_list(tb, free_us); *ret = am_true; @@ -1102,14 +1157,15 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) HashDbTerm** bp; HashDbTerm* b; HashDbTerm* free_us = NULL; - erts_rwmtx_t* lck; + DbTableHashLockAndCounter* lck_ctr; int nitems_diff = 0; + Sint nitems; int nkeys = 0; Eterm key; key = GETKEY(tb, tuple_val(object)); hval = MAKE_HASH(key); - lck = WLOCK_HASH(tb,hval); + lck_ctr = WLOCK_HASH_GET_LCK_AND_CTR(tb,hval); ix = hash_to_ix(tb, hval); bp = &BUCKET(tb, ix); b = *bp; @@ -1142,10 +1198,13 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) bp = &b->next; b = b->next; } - WUNLOCK_HASH(lck); if (nitems_diff) { - ADD_NITEMS(tb, nitems_diff); - try_shrink(tb); + ADD_NITEMS(tb, lck_ctr, hval, nitems_diff); + nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval); + } + WUNLOCK_HASH_LCK_CTR(lck_ctr); + if (nitems_diff) { + try_shrink(tb, nitems); } free_term_list(tb, free_us); *ret = am_true; @@ -2032,9 +2091,11 @@ static int select_delete_on_match_res(traverse_context_t* ctx_base, Sint slot_ix HashDbTerm** current_ptr = *current_ptr_ptr; select_delete_context_t* ctx = (select_delete_context_t*) ctx_base; HashDbTerm* del; + DbTableHashLockAndCounter* lck_ctr; + Uint32 hval; if (match_res != am_true) return 0; - + hval = (*current_ptr)->hvalue; if (NFIXED(ctx->base.tb) > ctx->fixated_by_me) { /* fixated by others? */ if (slot_ix != ctx->last_pseudo_delete) { if (!add_fixed_deletion(ctx->base.tb, slot_ix, ctx->fixated_by_me)) @@ -2050,23 +2111,58 @@ static int select_delete_on_match_res(traverse_context_t* ctx_base, Sint slot_ix del->next = ctx->free_us; ctx->free_us = del; } - DEC_NITEMS(ctx->base.tb); + lck_ctr = GET_LOCK_AND_CTR(ctx->base.tb,slot_ix); + DEC_NITEMS(ctx->base.tb, lck_ctr, hval); return 1; } +/* This function is only safe to call while the table lock is held in + write mode */ +static Sint get_nitems_from_locks_or_counter(DbTableHash* tb) +{ + if (IS_DECENTRALIZED_CTRS(tb)) { + int i; + Sint total = 0; + for (i=0; i < DB_HASH_LOCK_CNT; ++i) { + total += tb->locks->lck_vec[i].lck_ctr.nitems; + } + return total; + } else { + return erts_flxctr_read_centralized(&tb->common.counters, + ERTS_DB_TABLE_NITEMS_COUNTER_ID); + } +} + static int select_delete_on_loop_ended(traverse_context_t* ctx_base, Sint slot_ix, Sint got, Sint iterations_left, Binary** mpp, Eterm* ret) { select_delete_context_t* ctx = (select_delete_context_t*) ctx_base; - free_term_list(ctx->base.tb, ctx->free_us); + DbTableHash* tb = ctx->base.tb; + free_term_list(tb, ctx->free_us); ctx->free_us = NULL; ASSERT(iterations_left <= MAX_SELECT_DELETE_ITERATIONS); BUMP_REDS(ctx->base.p, MAX_SELECT_DELETE_ITERATIONS - iterations_left); if (got) { - try_shrink(ctx->base.tb); + Sint nitems; + if (IS_DECENTRALIZED_CTRS(tb)) { + /* Get a random hash value so we can get an nitems + estimate from a random lock */ + HashValue hval = + (HashValue)&ctx + + (HashValue)iterations_left + + (HashValue)erts_get_scheduler_data()->reductions; + erts_rwmtx_t* lck = RLOCK_HASH(tb, hval); + DbTableHashLockAndCounter* lck_ctr = GET_LOCK_AND_CTR(tb, hval); + nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval); + RUNLOCK_HASH(lck); + } else { + nitems = erts_flxctr_read_centralized(&tb->common.counters, + ERTS_DB_TABLE_NITEMS_COUNTER_ID); + } + try_shrink(tb, nitems); } *ret = erts_make_integer(got, ctx->base.p); return DB_ERROR_NONE; @@ -2297,9 +2393,10 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) HashDbTerm **bp, *b; HashDbTerm *free_us = NULL; HashValue hval = MAKE_HASH(key); - erts_rwmtx_t *lck = WLOCK_HASH(tb, hval); + DbTableHashLockAndCounter *lck_ctr = WLOCK_HASH_GET_LCK_AND_CTR(tb, hval); int ix = hash_to_ix(tb, hval); int nitems_diff = 0; + Sint nitems; *ret = NIL; for (bp = &BUCKET(tb, ix), b = *bp; b; bp = &b->next, b = b->next) { @@ -2325,10 +2422,13 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) break; } } - WUNLOCK_HASH(lck); if (nitems_diff) { - ADD_NITEMS(tb, nitems_diff); - try_shrink(tb); + ADD_NITEMS(tb, lck_ctr, hval, nitems_diff); + nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval); + } + WUNLOCK_HASH_LCK_CTR(lck_ctr); + if (nitems_diff) { + try_shrink(tb, nitems); } free_term_list(tb, free_us); return DB_ERROR_NONE; @@ -2452,7 +2552,7 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl) static int db_free_empty_table_hash(DbTable *tbl) { - ASSERT(NITEMS(tbl) == 0); + ASSERT(get_nitems_from_locks_or_counter(&tbl->hash) == 0); while (db_free_table_continue_hash(tbl, ERTS_SWORD_MAX) < 0) ; return 0; @@ -2495,8 +2595,11 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) (void*)tb->locks, sizeof(DbTableHashFineLocks)); tb->locks = NULL; } - ASSERT(sizeof(DbTable) == erts_flxctr_read_approx(&tb->common.counters, - ERTS_DB_TABLE_MEM_COUNTER_ID)); + ASSERT(erts_flxctr_is_snapshot_ongoing(&tb->common.counters) || + ((sizeof(DbTable) + + erts_flxctr_nr_of_allocated_bytes(&tb->common.counters)) == + erts_flxctr_read_approx(&tb->common.counters, + ERTS_DB_TABLE_MEM_COUNTER_ID))); return reds; /* Done */ } @@ -3159,13 +3262,13 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, DbTableHash *tb = &tbl->hash; HashValue hval; HashDbTerm **bp, *b; - erts_rwmtx_t* lck; + DbTableHashLockAndCounter* lck_ctr; int flags = 0; ASSERT(tb->common.status & DB_SET); hval = MAKE_HASH(key); - lck = WLOCK_HASH(tb, hval); + lck_ctr = WLOCK_HASH_GET_LCK_AND_CTR(tb, hval); bp = &BUCKET(tb, hash_to_ix(tb, hval)); b = *bp; @@ -3184,7 +3287,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, } if (obj == THE_NON_VALUE) { - WUNLOCK_HASH(lck); + WUNLOCK_HASH_LCK_CTR(lck_ctr); return 0; } @@ -3217,7 +3320,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, ASSERT(q->hvalue == hval); q->pseudo_deleted = 0; *bp = b = q; - INC_NITEMS(tb); + INC_NITEMS(tb, lck_ctr, hval); } HRelease(p, hend, htop); @@ -3230,7 +3333,7 @@ Ldone: handle->dbterm = &b->dbterm; handle->flags = flags; handle->new_size = b->dbterm.size; - handle->u.hash.lck = lck; + handle->u.hash.lck_ctr = lck_ctr; return 1; } @@ -3243,10 +3346,12 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) DbTableHash *tb = &tbl->hash; HashDbTerm **bp = (HashDbTerm **) handle->bp; HashDbTerm *b = *bp; - erts_rwmtx_t* lck = handle->u.hash.lck; + Uint32 hval = b->hvalue; + DbTableHashLockAndCounter* lck_ctr = handle->u.hash.lck_ctr; HashDbTerm* free_me = NULL; + Sint nitems; - ERTS_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */ + ERTS_LC_ASSERT(IS_HASH_WLOCKED(tb, &lck_ctr->lck)); /* locked by db_lookup_dbterm_hash */ ASSERT((&b->dbterm == handle->dbterm) == !(tb->common.compress && handle->flags & DB_MUST_RESIZE)); @@ -3258,10 +3363,10 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) *bp = b->next; free_me = b; } - - WUNLOCK_HASH(lck); - DEC_NITEMS(tb); - try_shrink(tb); + DEC_NITEMS(tb, lck_ctr, hval); + nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval); + WUNLOCK_HASH_LCK_CTR(lck_ctr); + try_shrink(tb, nitems); } else { if (handle->flags & DB_MUST_RESIZE) { db_finalize_resize(handle, offsetof(HashDbTerm,dbterm)); @@ -3269,15 +3374,17 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) } if (handle->flags & DB_INC_TRY_GROW) { int nactive; - int nitems = INC_NITEMS(tb); - WUNLOCK_HASH(lck); + int nitems; + INC_NITEMS(tb, lck_ctr, hval); + nitems = NITEMS_ESTIMATE(tb, lck_ctr, hval); + WUNLOCK_HASH_LCK_CTR(lck_ctr); nactive = NACTIVE(tb); if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { grow(tb, nitems); } } else { - WUNLOCK_HASH(lck); + WUNLOCK_HASH_LCK_CTR(lck_ctr); } } @@ -3296,9 +3403,7 @@ static SWord db_delete_all_objects_hash(Process* p, Eterm* nitems_holder_wb) { if (nitems_holder_wb != NULL) { - Uint nr_of_items = - erts_flxctr_read_centralized(&tbl->common.counters, - ERTS_DB_TABLE_NITEMS_COUNTER_ID); + Uint nr_of_items = get_nitems_from_locks_or_counter(&tbl->hash); *nitems_holder_wb = erts_make_integer(nr_of_items, p); } if (IS_FIXED(tbl)) { diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index b26b82056f..d06ffb10b8 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -53,9 +53,14 @@ typedef struct hash_db_term { #define DB_HASH_LOCK_CNT 64 #endif +typedef struct DbTableHashLockAndCounter { + Uint nitems; + erts_rwmtx_t lck; +} DbTableHashLockAndCounter; + typedef struct db_table_hash_fine_locks { union { - erts_rwmtx_t lck; + DbTableHashLockAndCounter lck_ctr; byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))]; }lck_vec[DB_HASH_LOCK_CNT]; } DbTableHashFineLocks; diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index 954fe1dcaf..67eb511681 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -2307,9 +2307,12 @@ static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds) sizeof(TreeDbTerm *) * STACK_NEED); ASSERT(erts_flxctr_is_snapshot_ongoing(&tb->common.counters) || ((APPROX_MEM_CONSUMED(tb) - == sizeof(DbTable)) || + == (sizeof(DbTable) + + erts_flxctr_nr_of_allocated_bytes(&tb->common.counters))) || (APPROX_MEM_CONSUMED(tb) - == (sizeof(DbTable) + sizeof(DbFixation))))); + == (sizeof(DbTable) + + sizeof(DbFixation) + + erts_flxctr_nr_of_allocated_bytes(&tb->common.counters))))); } return reds; } diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index f038dba89a..4a87956c99 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -92,7 +92,7 @@ typedef struct { int flags; union { struct { - erts_rwmtx_t* lck; + struct DbTableHashLockAndCounter* lck_ctr; } hash; struct { struct DbTableCATreeNode* base_node; diff --git a/erts/emulator/beam/erl_flxctr.c b/erts/emulator/beam/erl_flxctr.c index 35f4a21508..35c4de1a27 100644 --- a/erts/emulator/beam/erl_flxctr.c +++ b/erts/emulator/beam/erl_flxctr.c @@ -46,6 +46,29 @@ typedef enum { ERTS_FLXCTR_SNAPSHOT_ONGOING_TP_THREAD_DO_FREE = 2 } erts_flxctr_snapshot_status; +#define ERTS_FLXCTR_DECENTRALIZED_COUNTER_ARRAY_SIZE \ + (sizeof(ErtsFlxCtrDecentralizedCtrArray) + \ + (sizeof(ErtsFlxCtrDecentralizedCtrArrayElem) * \ + ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS) + \ + ERTS_CACHE_LINE_SIZE) + +#ifdef DEBUG +#define FLXCTR_MEM_DEBUG 1 +#endif + +#ifdef FLXCTR_MEM_DEBUG +static erts_atomic_t debug_mem_usage; +#endif + +#ifdef FLXCTR_MEM_DEBUG +#define FLXCTR_FREE(ALLOC_TYPE, ADDRESS) do { \ + erts_free(ALLOC_TYPE, ADDRESS); \ + erts_atomic_add_mb(&debug_mem_usage, -ERTS_FLXCTR_DECENTRALIZED_COUNTER_ARRAY_SIZE); \ + } while(0) +#else +#define FLXCTR_FREE(ALLOC_TYPE, ADDRESS) erts_free(ALLOC_TYPE, ADDRESS) +#endif + static void thr_prg_wake_up_and_count(void* bin_p) { @@ -72,13 +95,13 @@ thr_prg_wake_up_and_count(void* bin_p) } /* Announce that the snapshot is done */ { - Sint expected = ERTS_FLXCTR_SNAPSHOT_ONGOING; - if (expected != erts_atomic_cmpxchg_mb(&next->snapshot_status, - ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING, - expected)) { - /* The CAS failed which means that this thread need to free the next array. */ - erts_free(info->alloc_type, next->block_start); - } + Sint expected = ERTS_FLXCTR_SNAPSHOT_ONGOING; + if (expected != erts_atomic_cmpxchg_mb(&next->snapshot_status, + ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING, + expected)) { + /* The CAS failed which means that this thread need to free the next array. */ + FLXCTR_FREE(info->alloc_type, next->block_start); + } } /* Resume the process that requested the snapshot */ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); @@ -86,7 +109,7 @@ thr_prg_wake_up_and_count(void* bin_p) erts_resume(p, ERTS_PROC_LOCK_STATUS); } /* Free the memory that is no longer needed */ - erts_free(info->alloc_type, array->block_start); + FLXCTR_FREE(info->alloc_type, array->block_start); erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_proc_dec_refc(p); erts_bin_release(bin); @@ -141,6 +164,14 @@ static void suspend_until_thr_prg(Process* p) erts_schedule_thr_prgr_later_op(thr_prg_wake_up_later, state_bin, &info->later_op); } +size_t erts_flxctr_nr_of_allocated_bytes(ErtsFlxCtr* c) +{ + if (c->is_decentralized) { + return ERTS_FLXCTR_DECENTRALIZED_COUNTER_ARRAY_SIZE; + } else { + return 0; + } +} static ErtsFlxCtrDecentralizedCtrArray* create_decentralized_ctr_array(ErtsAlcType_t alloc_type, Uint nr_of_counters) { @@ -148,14 +179,14 @@ create_decentralized_ctr_array(ErtsAlcType_t alloc_type, Uint nr_of_counters) { the array field is located at the start of a cache line */ char* bytes = erts_alloc(alloc_type, - sizeof(ErtsFlxCtrDecentralizedCtrArray) + - (sizeof(ErtsFlxCtrDecentralizedCtrArrayElem) * - ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS) + - ERTS_CACHE_LINE_SIZE); + ERTS_FLXCTR_DECENTRALIZED_COUNTER_ARRAY_SIZE); void* block_start = bytes; int bytes_to_next_cacheline_border; ErtsFlxCtrDecentralizedCtrArray* array; int i, sched; +#ifdef FLXCTR_MEM_DEBUG + erts_atomic_add_mb(&debug_mem_usage, ERTS_FLXCTR_DECENTRALIZED_COUNTER_ARRAY_SIZE); +#endif bytes = &bytes[offsetof(ErtsFlxCtrDecentralizedCtrArray, array)]; bytes_to_next_cacheline_border = ERTS_CACHE_LINE_SIZE - (((Uint)bytes) % ERTS_CACHE_LINE_SIZE); @@ -178,6 +209,9 @@ create_decentralized_ctr_array(ErtsAlcType_t alloc_type, Uint nr_of_counters) { void erts_flxctr_setup(int decentralized_counter_groups) { reader_groups_array_size = decentralized_counter_groups+1; +#ifdef FLXCTR_MEM_DEBUG + erts_atomic_init_mb(&debug_mem_usage, 0); +#endif } void erts_flxctr_init(ErtsFlxCtr* c, @@ -203,7 +237,7 @@ void erts_flxctr_init(ErtsFlxCtr* c, } } -void erts_flxctr_destroy(ErtsFlxCtr* c, ErtsAlcType_t type) +void erts_flxctr_destroy(ErtsFlxCtr* c, ErtsAlcType_t alloc_type) { if (c->is_decentralized) { if (erts_flxctr_is_snapshot_ongoing(c)) { @@ -220,10 +254,10 @@ void erts_flxctr_destroy(ErtsFlxCtr* c, ErtsAlcType_t type) snapshot is ongoing anymore and the freeing needs to be done here */ ERTS_ASSERT(!erts_flxctr_is_snapshot_ongoing(c)); - erts_free(type, array->block_start); + FLXCTR_FREE(alloc_type, array->block_start); } } else { - erts_free(type, ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c)->block_start); + FLXCTR_FREE(alloc_type, ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c)->block_start); } } } @@ -257,7 +291,7 @@ erts_flxctr_snapshot(ErtsFlxCtr* c, ErtsFlxCtrSnapshotResult res = {.type = ERTS_FLXCTR_TRY_AGAIN_AFTER_TRAP}; suspend_until_thr_prg(p); - erts_free(alloc_type, new_array->block_start); + FLXCTR_FREE(alloc_type, new_array->block_start); return res; } /* Create binary with info about the operation that can be @@ -364,7 +398,19 @@ void erts_flxctr_reset(ErtsFlxCtr* c, } -void erts_flxctr_set_slot(int group) { +void erts_flxctr_set_slot(int group) +{ ErtsSchedulerData *esdp = erts_get_scheduler_data(); esdp->flxctr_slot_no = group; } + +Sint erts_flxctr_debug_memory_usage(void) +{ +#ifdef FLXCTR_MEM_DEBUG + return erts_atomic_read_mb(&debug_mem_usage); +#else + return -1; +#endif +} + + diff --git a/erts/emulator/beam/erl_flxctr.h b/erts/emulator/beam/erl_flxctr.h index 5cab02b9eb..df60f3651e 100644 --- a/erts/emulator/beam/erl_flxctr.h +++ b/erts/emulator/beam/erl_flxctr.h @@ -288,6 +288,24 @@ int erts_flxctr_is_snapshot_ongoing(ErtsFlxCtr* c); */ int erts_flxctr_suspend_until_thr_prg_if_snapshot_ongoing(ErtsFlxCtr* c, Process* p); +/** + * @brief This function returns the number of bytes that are allocated + * for for the given FlxCtr. + * + * @return nr of bytes allocated for the FlxCtr + */ +size_t erts_flxctr_nr_of_allocated_bytes(ErtsFlxCtr* c); + +/** + * @brief This debug function returns the amount of memory allocated + * for decentralized counter arrays when compiled with the DEBUG + * macro. The function returns -1 if the DEBUG macro is undefined. + * + * @return number of bytes allocated for decentralized counter arrays + * if in debug mode and otherwise -1 + */ +Sint erts_flxctr_debug_memory_usage(void); + /* End: Public Interface */ /* Internal Declarations */ diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 09a6c0e961..c8bb7f9186 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1866,6 +1866,7 @@ Uint erts_debug_nbalance(void); #define ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS (1 << 0) #define ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS (1 << 1) #define ERTS_DEBUG_WAIT_COMPLETED_AUX_WORK (1 << 2) +#define ERTS_DEBUG_WAIT_COMPLETED_THREAD_PROGRESS (1 << 3) int erts_debug_wait_completed(Process *c_p, int flags); diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl index b1813052d2..e992397519 100644 --- a/lib/stdlib/test/ets_SUITE.erl +++ b/lib/stdlib/test/ets_SUITE.erl @@ -44,7 +44,7 @@ t_delete_all_objects/1, t_insert_list/1, t_test_ms/1, t_select_delete/1,t_select_replace/1,t_select_replace_next_bug/1,t_ets_dets/1]). -export([test_table_size_concurrency/1,test_table_memory_concurrency/1, - test_delete_table_while_size_snapshot/1, test_delete_table_while_size_snapshot_helper/0]). + test_delete_table_while_size_snapshot/1, test_delete_table_while_size_snapshot_helper/1]). -export([ordered/1, ordered_match/1, interface_equality/1, fixtable_next/1, fixtable_iter_bag/1, @@ -2692,8 +2692,14 @@ write_concurrency(Config) when is_list(Config) -> true = YesMem > NoHashMem, true = YesMem > NoTreeMem, true = YesMem > YesTreeMem, - true = YesTreeMem < NoTreeMem, - + %% The amount of memory used by ordered_set with write_concurrency + %% enabled depend on the number of schedulers due its use of + %% decentralized counters + case erlang:system_info(schedulers) of + N when N =< 4 -> + true = YesTreeMem < NoTreeMem; + _ -> ok + end, {'EXIT',{badarg,_}} = (catch ets_new(foo,[public,{write_concurrency,foo}])), {'EXIT',{badarg,_}} = (catch ets_new(foo,[public,{write_concurrency}])), {'EXIT',{badarg,_}} = (catch ets_new(foo,[public,{write_concurrency,true,foo}])), @@ -4696,7 +4702,10 @@ size_loop(_T, 0, _, _) -> size_loop(T, I, PrevSize, WhatToTest) -> Size = ets:info(T, WhatToTest), case Size < PrevSize of - true -> ct:fail("Bad ets:info/2"); + true -> + io:format("Bad ets:info/2 (got ~p expected >=~p)", + [Size, PrevSize]), + ct:fail("Bad ets:info/2)"); _ -> ok end, size_loop(T, I -1, Size, WhatToTest). @@ -4708,13 +4717,17 @@ add_loop(T, I) -> add_loop(T, I -1). -test_table_counter_concurrency(WhatToTest) -> +test_table_counter_concurrency(WhatToTest, TableOptions) -> IntStatePrevOn = erts_debug:set_internal_state(available_internal_state, true), ItemsToAdd = 1000000, SizeLoopSize = 1000, - T = ets:new(k, [public, ordered_set, {write_concurrency, true}]), - erts_debug:set_internal_state(ets_debug_random_split_join, {T, false}), + T = ets:new(k, TableOptions), + case lists:member(ordered_set, TableOptions) of + true -> + erts_debug:set_internal_state(ets_debug_random_split_join, {T, false}); + false -> ok + end, 0 = ets:info(T, size), P = self(), SpawnedSizeProcs = @@ -4742,10 +4755,14 @@ test_table_counter_concurrency(WhatToTest) -> ok. test_table_size_concurrency(Config) when is_list(Config) -> - test_table_counter_concurrency(size). + BaseOptions = [public, {write_concurrency, true}], + test_table_counter_concurrency(size, [set | BaseOptions]), + test_table_counter_concurrency(size, [ordered_set | BaseOptions]). test_table_memory_concurrency(Config) when is_list(Config) -> - test_table_counter_concurrency(memory). + BaseOptions = [public, {write_concurrency, true}], + test_table_counter_concurrency(memory, [set | BaseOptions]), + test_table_counter_concurrency(memory, [ordered_set | BaseOptions]). %% Tests that calling the ets:delete operation on a table T with %% decentralized counters works while ets:info(T, size) operations are @@ -4755,15 +4772,19 @@ test_delete_table_while_size_snapshot(Config) when is_list(Config) -> %% depend on that pids are ordered in creation order which is no %% longer the case when many processes have been started before Node = start_slave(), - ok = rpc:call(Node, ?MODULE, test_delete_table_while_size_snapshot_helper, []), + [ok = rpc:call(Node, + ?MODULE, + test_delete_table_while_size_snapshot_helper, + [TableType]) + || TableType <- [set, ordered_set]], test_server:stop_node(Node), ok. -test_delete_table_while_size_snapshot_helper()-> +test_delete_table_while_size_snapshot_helper(TableType) -> TopParent = self(), repeat_par( fun() -> - Table = ets:new(t, [public, ordered_set, + Table = ets:new(t, [public, TableType, {write_concurrency, true}]), Parent = self(), NrOfSizeProcs = 100, @@ -4771,7 +4792,7 @@ test_delete_table_while_size_snapshot_helper()-> || _ <- lists:seq(1, NrOfSizeProcs)], timer:sleep(1), ets:delete(Table), - [receive + [receive table_gone -> ok; Problem -> TopParent ! Problem end || _ <- Pids] @@ -7676,6 +7697,7 @@ my_tab_to_list(Ts,Key, Acc) -> wait_for_memory_deallocations() -> try + erts_debug:set_internal_state(wait, thread_progress), erts_debug:set_internal_state(wait, deallocations) catch error:undef -> @@ -7687,20 +7709,52 @@ etsmem() -> % The following is done twice to avoid an inconsistent memory % "snapshot" (see verify_etsmem/2). lists:foldl( - fun(_,_) -> + fun(AttemptNr, PrevEtsMem) -> + AllTabsExceptions = [logger, code], + %% The logger table is excluded from the AllTabs list + %% below because it uses decentralized counters to keep + %% track of the size and the memory counters. This cause + %% ets:info(T,size) and ets:info(T,memory) to trigger + %% allocations and frees that may change the amount of + %% memory that is allocated for ETS. + %% + %% The code table is excluded from the list below + %% because the amount of memory allocated for it may + %% change if the tested code loads a new module. + AllTabs = + lists:sort( + [begin + case ets:info(T,write_concurrency) of + true -> + ct:fail("Background ETS table (~p) that " + "use decentralized counters (Add exception?)", + [ets:info(T,name)]); + _ -> ok + end, + {T, + ets:info(T,name), + ets:info(T,size), + ets:info(T,memory), + ets:info(T,type)} + end + || T <- ets:all(), + not lists:member(ets:info(T, name), AllTabsExceptions)]), wait_for_memory_deallocations(), - - AllTabs = lists:map(fun(T) -> {T,ets:info(T,name),ets:info(T,size), - ets:info(T,memory),ets:info(T,type)} - end, ets:all()), - EtsAllocSize = erts_debug:alloc_blocks_size(ets_alloc), ErlangMemoryEts = try erlang:memory(ets) catch error:notsup -> notsup end, - - Mem = {ErlangMemoryEts, EtsAllocSize}, - {Mem, AllTabs} + FlxCtrMemUsage = erts_debug:get_internal_state(flxctr_memory_usage), + Mem = {ErlangMemoryEts, EtsAllocSize, FlxCtrMemUsage}, + EtsMem = {Mem, AllTabs}, + case PrevEtsMem of + first -> ok; + _ when PrevEtsMem =:= EtsMem -> ok; + _ -> + io:format("etsmem(): Change in attempt ~p~n~nbefore:~n~p~n~nafter:~n~p~n~n", + [AttemptNr, PrevEtsMem, EtsMem]) + end, + EtsMem end, - not_used, + first, lists:seq(1,2)). verify_etsmem(MI) -> |