summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLukas Larsson <lukas@erlang.org>2019-11-29 10:04:56 +0100
committerLukas Larsson <lukas@erlang.org>2019-11-29 10:04:56 +0100
commit24327177f0e84d9b0d6a8c67c89643199d609d1a (patch)
tree76f1601bf8624f6ad5b7bfb356c2747523d51dca
parent87ac04ace452d857d0980949e588b11cc028bf04 (diff)
parente941b8adac09c2e88d0e15ddb759e6e7d4dc636d (diff)
downloaderlang-24327177f0e84d9b0d6a8c67c89643199d609d1a.tar.gz
Merge branch 'lukas/erts/ets-smp-1-optimization/OTP-16315/OTP-16316' into maint
* lukas/erts/ets-smp-1-optimization/OTP-16315/OTP-16316: erts: Optimize dec_term for atoms when used by ets compressed erts: Optimize meta table lock to not be taken when +S 1 ets: Remove table locking when using smp 1
-rw-r--r--erts/emulator/beam/erl_db.c197
-rw-r--r--erts/emulator/beam/erl_db_hash.c6
-rw-r--r--erts/emulator/beam/erl_db_util.h3
-rw-r--r--erts/emulator/beam/external.c39
-rw-r--r--lib/stdlib/test/ets_SUITE.erl36
5 files changed, 172 insertions, 109 deletions
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 285c23ea01..b4a97b42c8 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -342,6 +342,9 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
{
unsigned bix = atom_val(name) & meta_name_tab_mask;
struct meta_name_tab_entry* bucket = &meta_name_tab[bix];
+ /* Only non-dirty schedulers are allowed to access the metatable
+ The smp 1 optimizations for ETS depend on that */
+ ASSERT(erts_get_scheduler_data() && !ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
*lockp = &meta_name_tab_rwlocks[bix % META_NAME_TAB_LOCK_CNT].lck;
return bucket;
}
@@ -410,10 +413,13 @@ free_dbtable(void *vtb)
sizeof(DbTable) == erts_flxctr_read_approx(&tb->common.counters,
ERTS_DB_TABLE_MEM_COUNTER_ID));
- erts_rwmtx_destroy(&tb->common.rwlock);
- erts_mtx_destroy(&tb->common.fixlock);
ASSERT(is_immed(tb->common.heir_data));
+ if (!DB_LOCK_FREE(tb)) {
+ erts_rwmtx_destroy(&tb->common.rwlock);
+ erts_mtx_destroy(&tb->common.fixlock);
+ }
+
if (tb->common.btid)
erts_bin_release(tb->common.btid);
@@ -592,74 +598,79 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock)
{
erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
if (use_frequent_read_lock)
- rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
if (erts_ets_rwmtx_spin_count >= 0)
- rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
- erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
- tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
- erts_mtx_init(&tb->common.fixlock, "db_tab_fix",
- tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
+ if (!DB_LOCK_FREE(tb)) {
+ erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_mtx_init(&tb->common.fixlock, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ }
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
+ ASSERT(!DB_LOCK_FREE(tb) || tb->common.is_thread_safe);
}
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
+ if (DB_LOCK_FREE(tb))
+ return;
if (tb->common.type & DB_FINE_LOCKED) {
- if (kind == LCK_WRITE) {
- erts_rwmtx_rwlock(&tb->common.rwlock);
- tb->common.is_thread_safe = 1;
- } else {
- erts_rwmtx_rlock(&tb->common.rwlock);
- ASSERT(!tb->common.is_thread_safe);
- }
+ if (kind == LCK_WRITE) {
+ erts_rwmtx_rwlock(&tb->common.rwlock);
+ tb->common.is_thread_safe = 1;
+ } else {
+ erts_rwmtx_rlock(&tb->common.rwlock);
+ ASSERT(!tb->common.is_thread_safe);
+ }
}
else
{
- switch (kind) {
- case LCK_WRITE:
- case LCK_WRITE_REC:
- erts_rwmtx_rwlock(&tb->common.rwlock);
- break;
- default:
- erts_rwmtx_rlock(&tb->common.rwlock);
- }
- ASSERT(tb->common.is_thread_safe);
+ switch (kind) {
+ case LCK_WRITE:
+ case LCK_WRITE_REC:
+ erts_rwmtx_rwlock(&tb->common.rwlock);
+ break;
+ default:
+ erts_rwmtx_rlock(&tb->common.rwlock);
+ }
+ ASSERT(tb->common.is_thread_safe);
}
}
static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
{
- /*
- * In NON-SMP case tb may refer to an already deallocated
- * DbTable structure. That is, ONLY the SMP case is allowed
- * to follow the tb pointer!
- */
+ if (DB_LOCK_FREE(tb))
+ return;
if (tb->common.type & DB_FINE_LOCKED) {
- if (kind == LCK_WRITE) {
- ASSERT(tb->common.is_thread_safe);
- tb->common.is_thread_safe = 0;
- erts_rwmtx_rwunlock(&tb->common.rwlock);
- }
- else {
- ASSERT(!tb->common.is_thread_safe);
- erts_rwmtx_runlock(&tb->common.rwlock);
- }
+ if (kind == LCK_WRITE) {
+ ASSERT(tb->common.is_thread_safe);
+ tb->common.is_thread_safe = 0;
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
+ }
+ else {
+ ASSERT(!tb->common.is_thread_safe);
+ erts_rwmtx_runlock(&tb->common.rwlock);
+ }
}
else {
- ASSERT(tb->common.is_thread_safe);
- switch (kind) {
- case LCK_WRITE:
- case LCK_WRITE_REC:
- erts_rwmtx_rwunlock(&tb->common.rwlock);
- break;
- default:
- erts_rwmtx_runlock(&tb->common.rwlock);
- }
+ ASSERT(tb->common.is_thread_safe);
+ switch (kind) {
+ case LCK_WRITE:
+ case LCK_WRITE_REC:
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
+ break;
+ default:
+ erts_rwmtx_runlock(&tb->common.rwlock);
+ }
}
}
static ERTS_INLINE int db_is_exclusive(DbTable* tb, db_lock_kind_t kind)
{
+ if (DB_LOCK_FREE(tb))
+ return 1;
+
return kind != LCK_READ && tb->common.is_thread_safe;
}
@@ -697,20 +708,23 @@ DbTable* db_get_table_aux(Process *p,
DbTable *tb;
/*
- * IMPORTANT: Only scheduler threads are allowed
- * to access tables. Memory management
- * depend on it.
+ * IMPORTANT: Only non-dirty scheduler threads are allowed
+ * to access tables. Memory management depend on it.
*/
- ASSERT(erts_get_scheduler_data());
+ ASSERT(erts_get_scheduler_data() && !ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+
+ if (META_DB_LOCK_FREE())
+ meta_already_locked = 1;
if (is_atom(id)) {
erts_rwmtx_t *mtl;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
if (!meta_already_locked)
erts_rwmtx_rlock(mtl);
- else{
+ else {
ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
- || erts_lc_rwmtx_is_rwlocked(mtl));
+ || erts_lc_rwmtx_is_rwlocked(mtl)
+ || META_DB_LOCK_FREE());
}
tb = NULL;
if (bucket->pu.tb != NULL) {
@@ -772,6 +786,10 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
+
+ if (META_DB_LOCK_FREE())
+ have_lock = 1;
+
if (!have_lock)
erts_rwmtx_rwlock(rwlock);
@@ -833,13 +851,17 @@ static int remove_named_tab(DbTable *tb, int have_lock)
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
ASSERT(is_table_named(tb));
+
+ if (META_DB_LOCK_FREE())
+ have_lock = 1;
+
if (!have_lock && erts_rwmtx_tryrwlock(rwlock) == EBUSY) {
db_unlock(tb, LCK_WRITE);
erts_rwmtx_rwlock(rwlock);
db_lock(tb, LCK_WRITE);
}
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock) || META_DB_LOCK_FREE());
if (bucket->pu.tb == NULL) {
goto done;
@@ -902,9 +924,9 @@ done:
static ERTS_INLINE void local_fix_table(DbTable* tb)
{
erts_refc_inc(&tb->common.fix_count, 1);
-}
+}
static ERTS_INLINE void local_unfix_table(DbTable* tb)
-{
+{
if (erts_refc_dectest(&tb->common.fix_count, 0) == 0) {
ASSERT(IS_HASH_TABLE(tb->common.status));
db_unfix_table_hash(&(tb->hash));
@@ -1661,13 +1683,15 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
if (tp[1] == am_keypos
&& is_small(tp[2]) && (signed_val(tp[2]) > 0)) {
keypos = signed_val(tp[2]);
- }
+ }
else if (tp[1] == am_write_concurrency) {
- if (tp[2] == am_true) {
- is_fine_locked = 1;
- } else if (tp[2] == am_false) {
+ if (tp[2] == am_true) {
+ is_fine_locked = 1;
+ } else if (tp[2] == am_false) {
+ is_fine_locked = 0;
+ } else break;
+ if (DB_LOCK_FREE(NULL))
is_fine_locked = 0;
- } else break;
}
else if (tp[1] == am_read_concurrency) {
if (tp[2] == am_true) {
@@ -2201,7 +2225,7 @@ static void delete_all_objects_continue(Process* p, DbTable* tb)
SWord initial_reds = ERTS_BIF_REDS_LEFT(p);
SWord reds = initial_reds;
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock) || DB_LOCK_FREE(tb));
if ((tb->common.status & (DB_DELETE|DB_BUSY)) != DB_BUSY)
return;
@@ -3848,7 +3872,10 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
db_lock(tb, LCK_WRITE_REC);
if (!(tb->common.status & DB_DELETE)) {
erts_aint_t diff;
- erts_mtx_lock(&tb->common.fixlock);
+ int use_locks = !DB_LOCK_FREE(tb);
+
+ if (use_locks)
+ erts_mtx_lock(&tb->common.fixlock);
ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p));
@@ -3858,7 +3885,9 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
+
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
work += db_unfix_table_hash(&(tb->hash));
}
@@ -4007,8 +4036,11 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks, void **yield_stat
static void fix_table_locked(Process* p, DbTable* tb)
{
DbFixation *fix;
+ int use_locks = !DB_LOCK_FREE(tb);
+
+ if (use_locks)
+ erts_mtx_lock(&tb->common.fixlock);
- erts_mtx_lock(&tb->common.fixlock);
erts_refc_inc(&tb->common.fix_count,1);
fix = tb->common.fixing_procs;
if (fix == NULL) {
@@ -4021,8 +4053,8 @@ static void fix_table_locked(Process* p, DbTable* tb)
if (fix) {
ASSERT(fixed_tabs_find(NULL, fix));
++(fix->counter);
-
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
return;
}
}
@@ -4035,7 +4067,9 @@ static void fix_table_locked(Process* p, DbTable* tb)
fix->counter = 1;
fixing_procs_rbt_insert(&tb->common.fixing_procs, fix);
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
+
p->flags |= F_USING_DB;
fixed_tabs_insert(p, fix);
@@ -4047,8 +4081,11 @@ static void unfix_table_locked(Process* p, DbTable* tb,
db_lock_kind_t* kind_p)
{
DbFixation* fix;
+ int use_locks = !DB_LOCK_FREE(tb);
+
+ if (use_locks)
+ erts_mtx_lock(&tb->common.fixlock);
- erts_mtx_lock(&tb->common.fixlock);
fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p);
if (fix) {
@@ -4057,7 +4094,8 @@ static void unfix_table_locked(Process* p, DbTable* tb,
ASSERT(fix->counter >= 0);
if (fix->counter == 0) {
fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
fixed_tabs_delete(p, fix);
erts_refc_dec(&fix->tabs.btid->intern.refc, 1);
@@ -4068,15 +4106,18 @@ static void unfix_table_locked(Process* p, DbTable* tb,
goto unlocked;
}
}
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
unlocked:
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
&& erts_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
- erts_rwmtx_runlock(&tb->common.rwlock);
- erts_rwmtx_rwlock(&tb->common.rwlock);
+ if (use_locks) {
+ erts_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
+ }
*kind_p = LCK_WRITE;
if (tb->common.status & (DB_DELETE|DB_BUSY))
return;
@@ -4148,7 +4189,7 @@ static SWord free_fixations_locked(Process* p, DbTable *tb)
{
struct free_fixations_ctx ctx;
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock) || DB_LOCK_FREE(tb));
ctx.p = p;
ctx.tb = tb;
@@ -4358,7 +4399,8 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
= ERTS_IS_ATOM_STR("safe_fixed_monotonic_time",
What))
|| ERTS_IS_ATOM_STR("safe_fixed", What)) {
- erts_mtx_lock(&tb->common.fixlock);
+ if (!DB_LOCK_FREE(tb))
+ erts_mtx_lock(&tb->common.fixlock);
if (IS_FIXED(tb)) {
Uint need;
Eterm *hp;
@@ -4400,7 +4442,8 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
} else {
ret = am_false;
}
- erts_mtx_unlock(&tb->common.fixlock);
+ if (!DB_LOCK_FREE(tb))
+ erts_mtx_unlock(&tb->common.fixlock);
} else if (ERTS_IS_ATOM_STR("stats",What)) {
if (IS_HASH_TABLE(tb->common.status)) {
FloatDef f;
@@ -4607,6 +4650,8 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
#ifdef ERTS_ENABLE_LOCK_COUNT
void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
+ if (DB_LOCK_FREE(tb))
+ return;
if(enable) {
erts_lcnt_install_new_lock_info(&tb->common.rwlock.lcnt, "db_tab",
tb->common.the_name, ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 5937bd64ec..17d20b2f77 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -271,7 +271,7 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_rwmtx_t* lck)
# define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd))
# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval)))
# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_lc_rwmtx_is_rwlocked(lck))
-# define IS_TAB_WLOCKED(tb) erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock)
+# define IS_TAB_WLOCKED(tb) (DB_LOCK_FREE(tb) || erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock))
#else
# define IS_HASH_RLOCKED(tb,hval) (1)
# define IS_HASH_WLOCKED(tb,hval) (1)
@@ -628,7 +628,7 @@ SWord db_unfix_table_hash(DbTableHash *tb)
FixedDeletion* fixdel;
SWord work = 0;
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb)
|| (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock)
&& !tb->common.is_thread_safe));
restart:
@@ -2814,7 +2814,7 @@ begin_resizing(DbTableHash* tb)
if (DB_USING_FINE_LOCKING(tb))
return !erts_atomic_xchg_acqb(&tb->is_resizing, 1);
else
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb));
return 1;
}
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index f038dba89a..7846a5c98a 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -344,6 +344,9 @@ typedef struct db_table_common {
#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
+#define META_DB_LOCK_FREE() (erts_no_schedulers == 1)
+#define DB_LOCK_FREE(T) META_DB_LOCK_FREE()
+
/*
* tplp is an untagged pointer to a tuple we know is large enough
* and dth is a pointer to a DbTableHash.
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 39bbf62eae..12eda60527 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -106,7 +106,7 @@ static int is_external_string(Eterm obj, Uint* lenp);
static byte* enc_atom(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
static byte* enc_pid(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
struct B2TContext_t;
-static byte* dec_term(ErtsDistExternal*, ErtsHeapFactory*, byte*, Eterm*, struct B2TContext_t*);
+static byte* dec_term(ErtsDistExternal*, ErtsHeapFactory*, byte*, Eterm*, struct B2TContext_t*, int);
static byte* dec_atom(ErtsDistExternal *, byte*, Eterm*);
static byte* dec_pid(ErtsDistExternal *, ErtsHeapFactory*, byte*, Eterm*, byte tag);
static Sint decoded_size(byte *ep, byte* endp, int internal_tags, struct B2TContext_t*);
@@ -1162,7 +1162,7 @@ erts_decode_dist_ext(ErtsHeapFactory* factory,
goto error;
ep++;
}
- ep = dec_term(edep, factory, ep, &obj, NULL);
+ ep = dec_term(edep, factory, ep, &obj, NULL, 0);
if (!ep)
goto error;
@@ -1196,7 +1196,7 @@ Eterm erts_decode_ext(ErtsHeapFactory* factory, byte **ext, Uint32 flags)
} else {
edep = NULL;
}
- ep = dec_term(edep, factory, ep, &obj, NULL);
+ ep = dec_term(edep, factory, ep, &obj, NULL, 0);
if (!ep) {
return THE_NON_VALUE;
}
@@ -1207,7 +1207,7 @@ Eterm erts_decode_ext(ErtsHeapFactory* factory, byte **ext, Uint32 flags)
Eterm erts_decode_ext_ets(ErtsHeapFactory* factory, byte *ext)
{
Eterm obj;
- ext = dec_term(NULL, factory, ext, &obj, NULL);
+ ext = dec_term(NULL, factory, ext, &obj, NULL, 1);
ASSERT(ext);
return obj;
}
@@ -1574,7 +1574,7 @@ binary2term_create(ErtsDistExternal *edep, ErtsBinary2TermState *state,
{
Eterm res;
- if (!dec_term(edep, factory, state->extp, &res, NULL))
+ if (!dec_term(edep, factory, state->extp, &res, NULL, 0))
res = THE_NON_VALUE;
if (state->exttmp) {
state->exttmp = 0;
@@ -1796,7 +1796,7 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Eterm bin, B2TContext *ctx)
ErtsDistExternal fakedep;
fakedep.flags = ctx->flags;
fakedep.data = NULL;
- dec_term(&fakedep, NULL, NULL, NULL, ctx);
+ dec_term(&fakedep, NULL, NULL, NULL, ctx, 0);
break;
}
case B2TDecodeFail:
@@ -3238,7 +3238,8 @@ dec_term(ErtsDistExternal *edep,
ErtsHeapFactory* factory,
byte* ep,
Eterm* objp,
- B2TContext* ctx)
+ B2TContext* ctx,
+ int ets_decode)
{
#define PSTACK_TYPE struct dec_term_hamt
PSTACK_DECLARE(hamt_array, 5);
@@ -3912,7 +3913,7 @@ dec_term_atom_common:
goto error;
}
factory->hp = hp;
- ep = dec_term(edep, factory, ep, &temp, NULL);
+ ep = dec_term(edep, factory, ep, &temp, NULL, 0);
hp = factory->hp;
if (ep == NULL) {
goto error;
@@ -4022,7 +4023,7 @@ dec_term_atom_common:
}
factory->hp = hp;
/* Index */
- if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) {
+ if ((ep = dec_term(edep, factory, ep, &temp, NULL, 0)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -4031,7 +4032,7 @@ dec_term_atom_common:
old_index = unsigned_val(temp);
/* Uniq */
- if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) {
+ if ((ep = dec_term(edep, factory, ep, &temp, NULL, 0)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -4098,7 +4099,7 @@ dec_term_atom_common:
}
/* Index */
- if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) {
+ if ((ep = dec_term(edep, factory, ep, &temp, NULL, 0)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -4107,7 +4108,7 @@ dec_term_atom_common:
old_index = unsigned_val(temp);
/* Uniq */
- if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) {
+ if ((ep = dec_term(edep, factory, ep, &temp, NULL, 0)) == NULL) {
goto error;
}
if (!is_small(temp)) {
@@ -4136,7 +4137,10 @@ dec_term_atom_common:
case ATOM_INTERNAL_REF2:
n = get_int16(ep);
ep += 2;
- if (n >= atom_table_size()) {
+ /* If this is an ets_decode we know that
+ the atom is valid, so we can skip the
+ validation check */
+ if (!ets_decode && n >= atom_table_size()) {
goto error;
}
*objp = make_atom(n);
@@ -4144,7 +4148,10 @@ dec_term_atom_common:
case ATOM_INTERNAL_REF3:
n = get_int24(ep);
ep += 3;
- if (n >= atom_table_size()) {
+ /* If this is an ets_decode we know that
+ the atom is valid, so we can skip the
+ validation check */
+ if (!ets_decode && n >= atom_table_size()) {
goto error;
}
*objp = make_atom(n);
@@ -5026,7 +5033,7 @@ Sint transcode_dist_obuf(ErtsDistOutputBuf* ob,
erts_factory_tmp_init(&ctx->ctl_factory, ctx->ctl_heap, hsz, ERTS_ALC_T_DIST_TRANSCODE);
ctx->msg_heap = NULL;
- decp = dec_term(NULL, &ctx->ctl_factory, ob->extp, &ctx->ctl_term, NULL);
+ decp = dec_term(NULL, &ctx->ctl_factory, ob->extp, &ctx->ctl_term, NULL, 0);
if (have_msg) {
ASSERT(decp == ob->msg_start); (void)decp;
ctx->b2t.u.sc.ep = NULL;
@@ -5086,7 +5093,7 @@ Sint transcode_dist_obuf(ErtsDistOutputBuf* ob,
case TRANSCODE_DEC_MSG:
if (ctx->b2t.reds <= 0)
ctx->b2t.reds = 1;
- decp = dec_term(NULL, NULL, NULL, NULL, &ctx->b2t);
+ decp = dec_term(NULL, NULL, NULL, NULL, &ctx->b2t, 0);
if (ctx->b2t.state < B2TDone) {
return -1;
}
diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl
index 4fcc0e65ea..b7fe664f12 100644
--- a/lib/stdlib/test/ets_SUITE.erl
+++ b/lib/stdlib/test/ets_SUITE.erl
@@ -2559,10 +2559,10 @@ write_concurrency(Config) when is_list(Config) ->
NoHashMem = ets:info(No8,memory),
NoHashMem = ets:info(No9,memory),
- true = YesMem > NoHashMem,
- true = YesMem > NoTreeMem,
+ true = YesMem > NoHashMem orelse erlang:system_info(schedulers) == 1,
+ true = YesMem > NoTreeMem orelse erlang:system_info(schedulers) == 1,
true = YesMem > YesTreeMem,
- true = YesTreeMem < NoTreeMem,
+ true = YesTreeMem < NoTreeMem orelse erlang:system_info(schedulers) == 1,
{'EXIT',{badarg,_}} = (catch ets_new(foo,[public,{write_concurrency,foo}])),
{'EXIT',{badarg,_}} = (catch ets_new(foo,[public,{write_concurrency}])),
@@ -4612,10 +4612,18 @@ test_table_counter_concurrency(WhatToTest) ->
ok.
test_table_size_concurrency(Config) when is_list(Config) ->
- test_table_counter_concurrency(size).
+ case erlang:system_info(schedulers) of
+ 1 -> {skip,"Only valid on smp > 1 systems"};
+ _ ->
+ test_table_counter_concurrency(size)
+ end.
test_table_memory_concurrency(Config) when is_list(Config) ->
- test_table_counter_concurrency(memory).
+ case erlang:system_info(schedulers) of
+ 1 -> {skip,"Only valid on smp > 1 systems"};
+ _ ->
+ test_table_counter_concurrency(memory)
+ end.
%% Tests that calling the ets:delete operation on a table T with
%% decentralized counters works while ets:info(T, size) operations are
@@ -4756,7 +4764,7 @@ tab2file_do(FName, Opts, TableType) ->
true = ets:info(Tab2, compressed),
Smp = erlang:system_info(smp_support),
Smp = ets:info(Tab2, read_concurrency),
- Smp = ets:info(Tab2, write_concurrency),
+ Smp = ets:info(Tab2, write_concurrency) orelse erlang:system_info(schedulers) == 1,
true = ets:delete(Tab2),
verify_etsmem(EtsMem).
@@ -8159,15 +8167,15 @@ ets_new(Name, Opts, KeyRange) ->
ets_new(Name, Opts, KeyRange, fun id/1).
ets_new(Name, Opts0, KeyRange, KeyFun) ->
- {CATree, Stimulate, RevOpts} =
- lists:foldl(fun(cat_ord_set, {false, false, Lacc}) ->
- {true, false, [ordered_set | Lacc]};
- (stim_cat_ord_set, {false, false, Lacc}) ->
- {true, true, [ordered_set | Lacc]};
- (Other, {CAT, STIM, Lacc}) ->
- {CAT, STIM, [Other | Lacc]}
+ {_Smp, CATree, Stimulate, RevOpts} =
+ lists:foldl(fun(cat_ord_set, {Smp, false, false, Lacc}) ->
+ {Smp, Smp, false, [ordered_set | Lacc]};
+ (stim_cat_ord_set, {Smp, false, false, Lacc}) ->
+ {Smp, Smp, Smp, [ordered_set | Lacc]};
+ (Other, {Smp, CAT, STIM, Lacc}) ->
+ {Smp, CAT, STIM, [Other | Lacc]}
end,
- {false, false, []},
+ {erlang:system_info(schedulers) > 1,false, false, []},
Opts0),
Opts = lists:reverse(RevOpts),
EtsNewHelper =