summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKjell Winblad <kjellwinblad@gmail.com>2021-09-07 15:49:04 +0200
committerKjell Winblad <kjellwinblad@gmail.com>2021-09-17 09:33:19 +0200
commit68eb4cfa92ddf7d2fe9bc60532b487594dcc737c (patch)
tree1e72b7e2482bc4bac466d23ecf32eb9f16eb044a
parent0bad25713b0bc4a875e9ef7d9b1abcb6a2f75061 (diff)
downloaderlang-68eb4cfa92ddf7d2fe9bc60532b487594dcc737c.tar.gz
Make it possible to set n.o. locks for hash ETS tables up to 32768
This commit makes it possible for users to explecitly set the number of locks that are used for tables of type set, bag and dubplicate_bag by configuring a table with the {write_concurrency, N} option, where N is an integer in the range [1, 32768].
-rw-r--r--erts/emulator/beam/erl_db.c37
-rw-r--r--erts/emulator/beam/erl_db_hash.c96
-rw-r--r--erts/emulator/beam/erl_db_hash.h11
-rw-r--r--erts/emulator/beam/erl_db_util.h1
-rw-r--r--lib/stdlib/doc/src/ets.xml31
-rw-r--r--lib/stdlib/src/ets.erl3
-rw-r--r--lib/stdlib/test/ets_SUITE.erl33
7 files changed, 163 insertions, 49 deletions
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 06cf04f9ee..061ccd5038 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -2254,8 +2254,10 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
Sint keypos;
int is_named, is_compressed;
int is_fine_locked, frequent_read;
+ int no_locks;
int is_decentralized_counters;
int is_decentralized_counters_option;
+ int is_explicit_lock_granularity;
int cret;
DbTableMethod* meth;
@@ -2276,6 +2278,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
heir = am_none;
heir_data = (UWord) am_undefined;
is_compressed = erts_ets_always_compress;
+ no_locks = -1;
+ is_explicit_lock_granularity = 0;
list = BIF_ARG_2;
while(is_list(list)) {
@@ -2301,10 +2305,22 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = signed_val(tp[2]);
}
else if (tp[1] == am_write_concurrency) {
- if (tp[2] == am_true) {
+ Sint no_locks_param;
+ if (is_integer(tp[2]) &&
+ term_to_Sint(tp[2], &no_locks_param) &&
+ no_locks_param >= 1 &&
+ no_locks_param <= 32768) {
is_fine_locked = 1;
+ is_explicit_lock_granularity = 1;
+ no_locks = no_locks_param;
+ } else if (tp[2] == am_true) {
+ is_fine_locked = 1;
+ is_explicit_lock_granularity = 0;
+ no_locks = -1;
} else if (tp[2] == am_false) {
is_fine_locked = 0;
+ is_explicit_lock_granularity = 0;
+ no_locks = -1;
} else break;
if (DB_LOCK_FREE(NULL))
is_fine_locked = 0;
@@ -2373,7 +2389,12 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
meth = &db_hash;
if (is_fine_locked && !(status & DB_PRIVATE)) {
status |= DB_FINE_LOCKED;
- }
+ if (is_explicit_lock_granularity) {
+ status |= DB_EXPLICIT_LOCK_GRANULARITY;
+ }
+ } else {
+ no_locks = -1;
+ }
}
else if (IS_TREE_TABLE(status)) {
meth = &db_tree;
@@ -2423,6 +2444,10 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.dbg_force_trap = erts_ets_dbg_force_trap;
#endif
+ if (IS_HASH_TABLE(status)) {
+ DbTableHash* hash_db = (DbTableHash*) tb;
+ hash_db->nlocks = no_locks;
+ }
cret = meth->db_create(BIF_P, tb);
ASSERT(cret == DB_ERROR_NONE); (void)cret;
@@ -5024,7 +5049,13 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
else if (tb->common.status & DB_PUBLIC)
ret = am_public;
} else if (What == am_write_concurrency) {
- ret = tb->common.status & DB_FINE_LOCKED ? am_true : am_false;
+ if ((tb->common.status & DB_FINE_LOCKED) &&
+ (tb->common.status & (DB_SET | DB_BAG | DB_DUPLICATE_BAG)) &&
+ (tb->common.status & DB_EXPLICIT_LOCK_GRANULARITY)) {
+ ret = erts_make_integer(tb->hash.nlocks, p);
+ } else {
+ ret = tb->common.status & DB_FINE_LOCKED ? am_true : am_false;
+ }
} else if (What == am_read_concurrency) {
ret = tb->common.status & DB_FREQ_READ ? am_true : am_false;
} else if (What == am_name) {
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 6eb7bde1b4..e21c833d78 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -92,7 +92,7 @@
#define NITEMS_ESTIMATE(DB, LCK_CTR, HASH) \
(IS_DECENTRALIZED_CTRS(DB) ? \
- (DB_HASH_LOCK_CNT * \
+ (((DB)->nlocks) * \
(LCK_CTR != NULL ? \
NITEMS_ESTIMATE_FROM_LCK_CTR(LCK_CTR) : \
NITEMS_ESTIMATE_FROM_LCK_CTR(GET_LOCK_AND_CTR(DB, HASH)))) : \
@@ -264,9 +264,10 @@ static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p)
((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
make_internal_hash(term, 0)) & MAX_HASH_MASK)
-# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
-# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck_ctr.lck)
-# define GET_LOCK_AND_CTR(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck_ctr)
+# define GET_LOCK_MASK(NO_LOCKS) ((NO_LOCKS)-1)
+
+# define GET_LOCK(tb,hval) (&(tb)->locks[(hval) & GET_LOCK_MASK(tb->nlocks)].u.lck_ctr.lck)
+# define GET_LOCK_AND_CTR(tb,hval) (&(tb)->locks[(hval) & GET_LOCK_MASK(tb->nlocks)].u.lck_ctr)
# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval))
/* Fine grained read lock */
@@ -349,10 +350,10 @@ static ERTS_INLINE void WUNLOCK_HASH_LCK_CTR(DbTableHashLockAndCounter* lck_ctr)
static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix,
erts_rwmtx_t** lck_ptr)
{
- ix += DB_HASH_LOCK_CNT;
+ ix += tb->nlocks;
if (ix < NACTIVE(tb)) return ix;
RUNLOCK_HASH(*lck_ptr);
- ix = (ix + 1) & DB_HASH_LOCK_MASK;
+ ix = (ix + 1) & GET_LOCK_MASK(tb->nlocks);
if (ix != 0) *lck_ptr = RLOCK_HASH(tb,ix);
return ix;
}
@@ -360,10 +361,10 @@ static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix,
static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix,
erts_rwmtx_t** lck_ptr)
{
- ix += DB_HASH_LOCK_CNT;
+ ix += tb->nlocks;
if (ix < NACTIVE(tb)) return ix;
WUNLOCK_HASH(*lck_ptr);
- ix = (ix + 1) & DB_HASH_LOCK_MASK;
+ ix = (ix + 1) & GET_LOCK_MASK(tb->nlocks);
if (ix != 0) *lck_ptr = WLOCK_HASH(tb,ix);
return ix;
}
@@ -439,7 +440,7 @@ typedef int ExtraMatchValidatorF(int keypos, Eterm match, Eterm guard, Eterm bod
** Forward decl's (static functions)
*/
static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix);
-static void alloc_seg(DbTableHash *tb);
+static void alloc_seg(DbTableHash *tb, int activate_new_seg);
static int free_seg(DbTableHash *tb);
static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
HashDbTerm *list);
@@ -807,25 +808,54 @@ int db_create_hash(Process *p, DbTable *tbl)
sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ));
erts_atomic_init_nob(&tb->is_resizing, 0);
+ if (tb->nlocks == -1 || !(tb->common.type & DB_FINE_LOCKED)) {
+ /*
+ The number of locks needs to be set even if fine grained
+ locking is not used as this variable is used when iterating
+ over the table
+ */
+ tb->nlocks = DB_HASH_LOCK_CNT;
+ }
+
if (tb->common.type & DB_FINE_LOCKED) {
erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
int i;
+ /*
+ nlocks needs to be a power of two so we round down to
+ nearest power of two
+ */
+ tb->nlocks = 1 << (erts_fit_in_bits_int64(tb->nlocks)-1);
+ /*
+ The table needs to be at least as big as the number of locks
+ so we expand until this properly is satisfied.
+ */
+ while (tb->nlocks > tb->nslots) {
+ alloc_seg(tb, 1);
+ }
+
if (tb->common.type & DB_FREQ_READ)
rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
if (erts_ets_rwmtx_spin_count >= 0)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
- tb->locks = (DbTableHashFineLocks*) erts_db_alloc(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
- (DbTable *) tb,
- sizeof(DbTableHashFineLocks));
- for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_rwmtx_init_opt(&tb->locks->lck_vec[i].lck_ctr.lck, &rwmtx_opt,
+ tb->locks = (DbTableHashFineLockSlot*) erts_db_alloc(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
+ (DbTable *) tb,
+ sizeof(DbTableHashFineLockSlot) * tb->nlocks);
+ for (i=0; i<tb->nlocks; ++i) {
+ erts_rwmtx_init_opt(&tb->locks[i].u.lck_ctr.lck, &rwmtx_opt,
"db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
- tb->locks->lck_vec[i].lck_ctr.nitems = 0;
+ tb->locks[i].u.lck_ctr.nitems = 0;
}
- /* This important property is needed to guarantee the two buckets
- * involved in a grow/shrink operation it protected by the same lock:
+ /*
+ * These important properties is needed to guarantee the two
+ * buckets involved in a grow/shrink operation it protected by
+ * the same lock:
*/
- ASSERT(erts_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
+ ASSERT((erts_atomic_read_nob(&tb->szm) + 1) % tb->nlocks == 0);
+ ASSERT(tb->nlocks <= erts_atomic_read_nob(&tb->nactive));
+ ASSERT(erts_atomic_read_nob(&tb->nactive) <= tb->nslots);
+ ASSERT(tb->nslots <= (erts_atomic_read_nob(&tb->szm) + 1));
+ ASSERT((tb->nlocks % 2) == 0);
+ ASSERT((erts_atomic_read_nob(&tb->szm) + 1) % 2 == 0);
}
else { /* coarse locking */
tb->locks = NULL;
@@ -2354,8 +2384,8 @@ static Sint get_nitems_from_locks_or_counter(DbTableHash* tb)
if (IS_DECENTRALIZED_CTRS(tb)) {
int i;
Sint total = 0;
- for (i=0; i < DB_HASH_LOCK_CNT; ++i) {
- total += tb->locks->lck_vec[i].lck_ctr.nitems;
+ for (i=0; i < tb->nlocks; ++i) {
+ total += tb->locks[i].u.lck_ctr.nitems;
}
return total;
} else {
@@ -2837,11 +2867,11 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
}
if (tb->locks != NULL) {
int i;
- for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
+ for (i=0; i<tb->nlocks; ++i) {
erts_rwmtx_destroy(GET_LOCK(tb,i));
}
erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb,
- (void*)tb->locks, sizeof(DbTableHashFineLocks));
+ (void*)tb->locks, tb->nlocks * sizeof(DbTableHashFineLockSlot));
tb->locks = NULL;
}
ASSERT(erts_flxctr_is_snapshot_ongoing(&tb->common.counters) ||
@@ -3058,13 +3088,13 @@ static void calc_shrink_limit(DbTableHash* tb)
/* const double d = n*x / (x + n - 1) + 1; */
/* printf("Cochran_formula=%f size=%d mod_with_size=%f\n", x, n, d); */
/* } */
- const int needed_slots = 100 * DB_HASH_LOCK_CNT;
+ const int needed_slots = 100 * tb->nlocks;
if (tb->nslots < needed_slots) {
sample_size_is_enough = 0;
}
}
- if (sample_size_is_enough && tb->nslots >= (FIRST_SEGSZ + 2*EXT_SEGSZ)) {
+ if (sample_size_is_enough && tb->nslots >= MAX(tb->nlocks + EXT_SEGSZ, (FIRST_SEGSZ + 2*EXT_SEGSZ))) {
/*
* Start shrink when the sample size is big enough for
* decentralized counters if decentralized counters are used
@@ -3092,7 +3122,7 @@ static void calc_shrink_limit(DbTableHash* tb)
/* Extend table with one new segment
*/
-static void alloc_seg(DbTableHash *tb)
+static void alloc_seg(DbTableHash *tb, int activate_buckets)
{
int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots);
struct segment** segtab;
@@ -3117,6 +3147,18 @@ static void alloc_seg(DbTableHash *tb)
}
#endif
tb->nslots += EXT_SEGSZ;
+ if (activate_buckets) {
+ erts_aint_t nactive_before = erts_atomic_read_nob(&tb->nactive);
+ erts_aint_t nactive_now = nactive_before + EXT_SEGSZ;
+ erts_aint_t floor_2_mult = 1 << (erts_fit_in_bits_int64(nactive_now)-1);
+ if (floor_2_mult != nactive_now) {
+ erts_atomic_set_nob(&tb->szm, (floor_2_mult << 1) - 1);
+ } else {
+ erts_atomic_set_nob(&tb->szm, floor_2_mult - 1);
+ }
+ sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ));
+ erts_atomic_set_nob(&tb->nactive, nactive_now);
+ }
calc_shrink_limit(tb);
}
@@ -3341,7 +3383,7 @@ static void grow(DbTableHash* tb, int nitems)
if (nactive == tb->nslots) {
/* Time to get a new segment */
ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0);
- alloc_seg(tb);
+ alloc_seg(tb, 0);
}
ASSERT(nactive < tb->nslots);
@@ -3964,7 +4006,7 @@ void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable) {
}
for(i = 0; i < DB_HASH_LOCK_CNT; i++) {
- erts_lcnt_ref_t *ref = &tb->locks->lck_vec[i].lck_ctr.lck.lcnt;
+ erts_lcnt_ref_t *ref = &tb->locks[i].u.lck_ctr.lck.lcnt;
if(enable) {
erts_lcnt_install_new_lock_info(ref, "db_hash_slot", tb->common.the_name,
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 830dc77114..0ec781b45d 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -58,12 +58,12 @@ typedef struct DbTableHashLockAndCounter {
erts_rwmtx_t lck;
} DbTableHashLockAndCounter;
-typedef struct db_table_hash_fine_locks {
+typedef struct db_table_hash_fine_lock_slot {
union {
DbTableHashLockAndCounter lck_ctr;
- byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))];
- }lck_vec[DB_HASH_LOCK_CNT];
-} DbTableHashFineLocks;
+ byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(DbTableHashLockAndCounter))];
+ } u;
+} DbTableHashFineLockSlot;
typedef struct db_table_hash {
DbTableCommon common;
@@ -77,13 +77,14 @@ typedef struct db_table_hash {
struct segment* first_segtab[1];
/* SMP: nslots and nsegs are protected by is_resizing or table write lock */
+ int nlocks; /* Needs to be smaller or equal to nactive */
int nslots; /* Total number of slots */
int nsegs; /* Size of segment table */
/* List of slots where elements have been deleted while table was fixed */
erts_atomic_t fixdel; /* (FixedDeletion*) */
erts_atomic_t is_resizing; /* grow/shrink in progress */
- DbTableHashFineLocks* locks;
+ DbTableHashFineLockSlot* locks;
} DbTableHash;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 127c5c45de..325cedc24a 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -345,6 +345,7 @@ typedef struct db_table_common {
#define DB_FREQ_READ (1 << 10) /* read_concurrency */
#define DB_NAMED_TABLE (1 << 11)
#define DB_BUSY (1 << 12)
+#define DB_EXPLICIT_LOCK_GRANULARITY (1 << 13)
#define DB_CATREE_FORCE_SPLIT (1 << 31) /* erts_debug */
#define DB_CATREE_DEBUG_RANDOM_SPLIT_JOIN (1 << 30) /* erts_debug */
diff --git a/lib/stdlib/doc/src/ets.xml b/lib/stdlib/doc/src/ets.xml
index 8f3c5131af..e1a051ba19 100644
--- a/lib/stdlib/doc/src/ets.xml
+++ b/lib/stdlib/doc/src/ets.xml
@@ -656,9 +656,9 @@ Error: fun containing local Erlang function calls
<p>Indicates whether the table uses <c>read_concurrency</c> or
not.</p>
</item>
- <tag><c>{write_concurrency, boolean()}</c></tag>
+ <tag><c>{write_concurrency, WriteConcurrencyAlternative}</c></tag>
<item>
- <p>Indicates whether the table uses <c>write_concurrency</c>.</p>
+ <p>Indicates which <c>write_concurrency</c> option the table uses.</p>
</item>
</taglist>
<note><p>The execution time of this function is affected by
@@ -1258,7 +1258,7 @@ ets:select(Table, MatchSpec),</code>
when the owner terminates.</p>
<marker id="new_2_write_concurrency"></marker>
</item>
- <tag><c>{write_concurrency,boolean()}</c></tag>
+ <tag><c>{write_concurrency,WriteConcurrencyAlternative}</c></tag>
<item>
<p>Performance tuning. Defaults to <c>false</c>, in which case an
operation that
@@ -1269,6 +1269,29 @@ ets:select(Table, MatchSpec),</code>
(and read) by concurrent processes. This is achieved to some
degree at the expense of memory consumption and the performance
of sequential access and concurrent reading.</p>
+ <p>Users can explicitly control the synchronization
+ granularity for tables of types <c>set</c>, <c>bag</c>,
+ and <c>duplicate_bag</c> by setting the
+ <c>write_concurrency</c> option to an integer in the range
+ <c>[2, 32768]</c>. Currently, setting the
+ <c>write_concurrency</c> option to a number for
+ <c>ordered_set</c> tables has the same effect as setting
+ its value to true. Explicitly setting the synchronization
+ granularity is not recommended unless it is motivated and
+ possible to do experimentation to figure out which
+ synchronization granularity gives the best performance.
+ Currently, the number
+ automatically rounds down to the nearest power of two. A
+ high number for this setting should usually not be
+ combined with the <c>{read_concurrency, true}</c> setting
+ as this usally lead to worse performance and high memory
+ utilization. It is usually not a good idea to set this
+ setting to a number that is much greater than the number
+ of expected items in the table, as this can lead to slow
+ table traversals. The effect of this setting might change
+ in future versions of Erlang/OTP. If you are unsure what
+ number to set this setting to, it is probably best to use
+ {write_concurrency, true} instead.</p>
<p>The <c>write_concurrency</c> option can be combined with the options
<seeerl marker="#new_2_read_concurrency">
<c>read_concurrency</c></seeerl> and
@@ -1281,7 +1304,7 @@ ets:select(Table, MatchSpec),</code>
<c>read_concurrency</c></seeerl>. The <c>decentralized_counters</c>
option is turned on by default for tables of type <c>ordered_set</c>
with the <c>write_concurrency</c> option enabled, and the
- <c>decentralized_counters</c> option is turned off by default for
+ <c>decentralized_counters</c> option is turned <em>off</em> by default for
all other table types.
For more information, see the documentation for the
<seeerl marker="#new_2_decentralized_counters">
diff --git a/lib/stdlib/src/ets.erl b/lib/stdlib/src/ets.erl
index a4414f4232..185081f326 100644
--- a/lib/stdlib/src/ets.erl
+++ b/lib/stdlib/src/ets.erl
@@ -309,7 +309,8 @@ member(_, _) ->
| {heir, Pid :: pid(), HeirData} | {heir, none} | Tweaks,
Type :: type(),
Access :: access(),
- Tweaks :: {write_concurrency, boolean()}
+ WriteConcurrencyAlternative :: boolean() | 2..32768,
+ Tweaks :: {write_concurrency, WriteConcurrencyAlternative}
| {read_concurrency, boolean()}
| {decentralized_counters, boolean()}
| compressed,
diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl
index f8b2be5f20..bcf1ca5221 100644
--- a/lib/stdlib/test/ets_SUITE.erl
+++ b/lib/stdlib/test/ets_SUITE.erl
@@ -4889,6 +4889,18 @@ info(Config) when is_list(Config) ->
{'EXIT',{badarg,_}} = (catch ets:info(make_ref())),
{'EXIT',{badarg,_}} = (catch ets:info(make_ref(), type)),
+ %% Test that one can set the synchronization granularity level for
+ %% tables of type set
+ T1 = ets:new(t1, [public, {write_concurrency, 1024}]),
+ 1024 = ets:info(T1, write_concurrency),
+ T2 = ets:new(t2, [public, {write_concurrency, 2048}]),
+ 2048 = ets:info(T2, write_concurrency),
+ T3 = ets:new(t3, [public, {write_concurrency, 1024}, {write_concurrency, true}]),
+ true = ets:info(T3, write_concurrency),
+ T4 = ets:new(t4, [private, {write_concurrency, 1024}]),
+ false = ets:info(T4, write_concurrency),
+ T5 = ets:new(t5, [private, {write_concurrency, true}]),
+ false = ets:info(T5, write_concurrency),
ok.
info_do(Opts) ->
@@ -4954,7 +4966,6 @@ info_do(Opts) ->
{value, {id, Tab}} = lists:keysearch(id, 1, Res),
{value, {decentralized_counters, _DecentralizedCtrs}} =
lists:keysearch(decentralized_counters, 1, Res),
-
%% Test 'binary'
[] = ?ets_info(Tab, binary, SlavePid),
BinSz = 100,
@@ -4979,7 +4990,6 @@ info_do(Opts) ->
unlink(SlavePid),
exit(SlavePid,kill),
-
true = ets:delete(Tab),
verify_etsmem(EtsMem).
@@ -9270,17 +9280,22 @@ repeat_for_opts_atom2list(ord_set_types) -> [ordered_set,stim_cat_ord_set,cat_or
repeat_for_opts_atom2list(all_types) -> [set,ordered_set,stim_cat_ord_set,cat_ord_set,bag,duplicate_bag];
repeat_for_opts_atom2list(all_non_stim_types) -> [set,ordered_set,cat_ord_set,bag,duplicate_bag];
repeat_for_opts_atom2list(all_non_stim_set_types) -> [set,ordered_set,cat_ord_set];
-repeat_for_opts_atom2list(write_concurrency) -> [{write_concurrency,false},{write_concurrency,true}];
+repeat_for_opts_atom2list(write_concurrency) -> [{write_concurrency,false},
+ {write_concurrency,true},
+ {write_concurrency,2},
+ {write_concurrency,2048}];
repeat_for_opts_atom2list(read_concurrency) -> [{read_concurrency,false},{read_concurrency,true}];
repeat_for_opts_atom2list(compressed) -> [void,compressed].
is_redundant_opts_combo(Opts) ->
- (lists:member(stim_cat_ord_set, Opts) orelse
- lists:member(cat_ord_set, Opts))
- andalso
- (lists:member({write_concurrency, false}, Opts) orelse
- lists:member(private, Opts) orelse
- lists:member(protected, Opts)).
+ ((lists:member(stim_cat_ord_set, Opts) orelse
+ lists:member(cat_ord_set, Opts))
+ andalso
+ (lists:member({write_concurrency, 2}, Opts) orelse
+ lists:member({write_concurrency, 2048}, Opts) orelse
+ lists:member({write_concurrency, false}, Opts) orelse
+ lists:member(private, Opts) orelse
+ lists:member(protected, Opts))).
%% Add fake table option with info about key range.
%% Will be consumed by ets_new and used for stim_cat_ord_set.