summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLukas Larsson <lukas@erlang.org>2019-10-31 17:14:05 +0100
committerLukas Larsson <lukas@erlang.org>2019-11-15 11:31:13 +0100
commit090efc1d2d21ed917caf07898c28c1bf7b884a8e (patch)
treebb69f761ededefad6de5c99ec1b74e51b128eb34
parent0739760ac6ad4c9b2a92bca04a6e68a0e90528ad (diff)
downloaderlang-090efc1d2d21ed917caf07898c28c1bf7b884a8e.tar.gz
ets: Remove table locking when using smp 1
If we only have one normal scheduler we know that only one thread can access an ETS table at a time, so we don't need to optimize away that. This approach reads and checks the global variable erts_no_schedulers each time, it could be optimized to use the table flags field instead. Though I'm not sure how much that would help. With this change doing ets:lookup/2 och a small value is about twice as fast.
-rw-r--r--erts/emulator/beam/erl_db.c169
-rw-r--r--erts/emulator/beam/erl_db_hash.c6
-rw-r--r--erts/emulator/beam/erl_db_util.h2
-rw-r--r--lib/stdlib/test/ets_SUITE.erl36
4 files changed, 127 insertions, 86 deletions
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 285c23ea01..04b0b1aa57 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -410,10 +410,13 @@ free_dbtable(void *vtb)
sizeof(DbTable) == erts_flxctr_read_approx(&tb->common.counters,
ERTS_DB_TABLE_MEM_COUNTER_ID));
- erts_rwmtx_destroy(&tb->common.rwlock);
- erts_mtx_destroy(&tb->common.fixlock);
ASSERT(is_immed(tb->common.heir_data));
+ if (!DB_LOCK_FREE(tb)) {
+ erts_rwmtx_destroy(&tb->common.rwlock);
+ erts_mtx_destroy(&tb->common.fixlock);
+ }
+
if (tb->common.btid)
erts_bin_release(tb->common.btid);
@@ -592,74 +595,79 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock)
{
erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
if (use_frequent_read_lock)
- rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
if (erts_ets_rwmtx_spin_count >= 0)
- rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
- erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
- tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
- erts_mtx_init(&tb->common.fixlock, "db_tab_fix",
- tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
+ if (!DB_LOCK_FREE(tb)) {
+ erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_mtx_init(&tb->common.fixlock, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ }
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
+ ASSERT(!DB_LOCK_FREE(tb) || tb->common.is_thread_safe);
}
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
+ if (DB_LOCK_FREE(tb))
+ return;
if (tb->common.type & DB_FINE_LOCKED) {
- if (kind == LCK_WRITE) {
- erts_rwmtx_rwlock(&tb->common.rwlock);
- tb->common.is_thread_safe = 1;
- } else {
- erts_rwmtx_rlock(&tb->common.rwlock);
- ASSERT(!tb->common.is_thread_safe);
- }
+ if (kind == LCK_WRITE) {
+ erts_rwmtx_rwlock(&tb->common.rwlock);
+ tb->common.is_thread_safe = 1;
+ } else {
+ erts_rwmtx_rlock(&tb->common.rwlock);
+ ASSERT(!tb->common.is_thread_safe);
+ }
}
else
{
- switch (kind) {
- case LCK_WRITE:
- case LCK_WRITE_REC:
- erts_rwmtx_rwlock(&tb->common.rwlock);
- break;
- default:
- erts_rwmtx_rlock(&tb->common.rwlock);
- }
- ASSERT(tb->common.is_thread_safe);
+ switch (kind) {
+ case LCK_WRITE:
+ case LCK_WRITE_REC:
+ erts_rwmtx_rwlock(&tb->common.rwlock);
+ break;
+ default:
+ erts_rwmtx_rlock(&tb->common.rwlock);
+ }
+ ASSERT(tb->common.is_thread_safe);
}
}
static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
{
- /*
- * In NON-SMP case tb may refer to an already deallocated
- * DbTable structure. That is, ONLY the SMP case is allowed
- * to follow the tb pointer!
- */
+ if (DB_LOCK_FREE(tb))
+ return;
if (tb->common.type & DB_FINE_LOCKED) {
- if (kind == LCK_WRITE) {
- ASSERT(tb->common.is_thread_safe);
- tb->common.is_thread_safe = 0;
- erts_rwmtx_rwunlock(&tb->common.rwlock);
- }
- else {
- ASSERT(!tb->common.is_thread_safe);
- erts_rwmtx_runlock(&tb->common.rwlock);
- }
+ if (kind == LCK_WRITE) {
+ ASSERT(tb->common.is_thread_safe);
+ tb->common.is_thread_safe = 0;
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
+ }
+ else {
+ ASSERT(!tb->common.is_thread_safe);
+ erts_rwmtx_runlock(&tb->common.rwlock);
+ }
}
else {
- ASSERT(tb->common.is_thread_safe);
- switch (kind) {
- case LCK_WRITE:
- case LCK_WRITE_REC:
- erts_rwmtx_rwunlock(&tb->common.rwlock);
- break;
- default:
- erts_rwmtx_runlock(&tb->common.rwlock);
- }
+ ASSERT(tb->common.is_thread_safe);
+ switch (kind) {
+ case LCK_WRITE:
+ case LCK_WRITE_REC:
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
+ break;
+ default:
+ erts_rwmtx_runlock(&tb->common.rwlock);
+ }
}
}
static ERTS_INLINE int db_is_exclusive(DbTable* tb, db_lock_kind_t kind)
{
+ if (DB_LOCK_FREE(tb))
+ return 1;
+
return kind != LCK_READ && tb->common.is_thread_safe;
}
@@ -902,9 +910,9 @@ done:
static ERTS_INLINE void local_fix_table(DbTable* tb)
{
erts_refc_inc(&tb->common.fix_count, 1);
-}
+}
static ERTS_INLINE void local_unfix_table(DbTable* tb)
-{
+{
if (erts_refc_dectest(&tb->common.fix_count, 0) == 0) {
ASSERT(IS_HASH_TABLE(tb->common.status));
db_unfix_table_hash(&(tb->hash));
@@ -1661,13 +1669,15 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
if (tp[1] == am_keypos
&& is_small(tp[2]) && (signed_val(tp[2]) > 0)) {
keypos = signed_val(tp[2]);
- }
+ }
else if (tp[1] == am_write_concurrency) {
- if (tp[2] == am_true) {
- is_fine_locked = 1;
- } else if (tp[2] == am_false) {
+ if (tp[2] == am_true) {
+ is_fine_locked = 1;
+ } else if (tp[2] == am_false) {
+ is_fine_locked = 0;
+ } else break;
+ if (DB_LOCK_FREE(NULL))
is_fine_locked = 0;
- } else break;
}
else if (tp[1] == am_read_concurrency) {
if (tp[2] == am_true) {
@@ -2201,7 +2211,7 @@ static void delete_all_objects_continue(Process* p, DbTable* tb)
SWord initial_reds = ERTS_BIF_REDS_LEFT(p);
SWord reds = initial_reds;
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock) || DB_LOCK_FREE(tb));
if ((tb->common.status & (DB_DELETE|DB_BUSY)) != DB_BUSY)
return;
@@ -3848,7 +3858,10 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
db_lock(tb, LCK_WRITE_REC);
if (!(tb->common.status & DB_DELETE)) {
erts_aint_t diff;
- erts_mtx_lock(&tb->common.fixlock);
+ int use_locks = !DB_LOCK_FREE(tb);
+
+ if (use_locks)
+ erts_mtx_lock(&tb->common.fixlock);
ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p));
@@ -3858,7 +3871,9 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
+
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
work += db_unfix_table_hash(&(tb->hash));
}
@@ -4007,8 +4022,11 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks, void **yield_stat
static void fix_table_locked(Process* p, DbTable* tb)
{
DbFixation *fix;
+ int use_locks = !DB_LOCK_FREE(tb);
+
+ if (use_locks)
+ erts_mtx_lock(&tb->common.fixlock);
- erts_mtx_lock(&tb->common.fixlock);
erts_refc_inc(&tb->common.fix_count,1);
fix = tb->common.fixing_procs;
if (fix == NULL) {
@@ -4021,8 +4039,8 @@ static void fix_table_locked(Process* p, DbTable* tb)
if (fix) {
ASSERT(fixed_tabs_find(NULL, fix));
++(fix->counter);
-
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
return;
}
}
@@ -4035,7 +4053,9 @@ static void fix_table_locked(Process* p, DbTable* tb)
fix->counter = 1;
fixing_procs_rbt_insert(&tb->common.fixing_procs, fix);
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
+
p->flags |= F_USING_DB;
fixed_tabs_insert(p, fix);
@@ -4047,8 +4067,11 @@ static void unfix_table_locked(Process* p, DbTable* tb,
db_lock_kind_t* kind_p)
{
DbFixation* fix;
+ int use_locks = !DB_LOCK_FREE(tb);
+
+ if (use_locks)
+ erts_mtx_lock(&tb->common.fixlock);
- erts_mtx_lock(&tb->common.fixlock);
fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p);
if (fix) {
@@ -4057,7 +4080,8 @@ static void unfix_table_locked(Process* p, DbTable* tb,
ASSERT(fix->counter >= 0);
if (fix->counter == 0) {
fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
fixed_tabs_delete(p, fix);
erts_refc_dec(&fix->tabs.btid->intern.refc, 1);
@@ -4068,15 +4092,18 @@ static void unfix_table_locked(Process* p, DbTable* tb,
goto unlocked;
}
}
- erts_mtx_unlock(&tb->common.fixlock);
+ if (use_locks)
+ erts_mtx_unlock(&tb->common.fixlock);
unlocked:
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
&& erts_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
- erts_rwmtx_runlock(&tb->common.rwlock);
- erts_rwmtx_rwlock(&tb->common.rwlock);
+ if (use_locks) {
+ erts_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
+ }
*kind_p = LCK_WRITE;
if (tb->common.status & (DB_DELETE|DB_BUSY))
return;
@@ -4148,7 +4175,7 @@ static SWord free_fixations_locked(Process* p, DbTable *tb)
{
struct free_fixations_ctx ctx;
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock) || DB_LOCK_FREE(tb));
ctx.p = p;
ctx.tb = tb;
@@ -4358,7 +4385,8 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
= ERTS_IS_ATOM_STR("safe_fixed_monotonic_time",
What))
|| ERTS_IS_ATOM_STR("safe_fixed", What)) {
- erts_mtx_lock(&tb->common.fixlock);
+ if (!DB_LOCK_FREE(tb))
+ erts_mtx_lock(&tb->common.fixlock);
if (IS_FIXED(tb)) {
Uint need;
Eterm *hp;
@@ -4400,7 +4428,8 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
} else {
ret = am_false;
}
- erts_mtx_unlock(&tb->common.fixlock);
+ if (!DB_LOCK_FREE(tb))
+ erts_mtx_unlock(&tb->common.fixlock);
} else if (ERTS_IS_ATOM_STR("stats",What)) {
if (IS_HASH_TABLE(tb->common.status)) {
FloatDef f;
@@ -4607,6 +4636,8 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
#ifdef ERTS_ENABLE_LOCK_COUNT
void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
+ if (DB_LOCK_FREE(tb))
+ return;
if(enable) {
erts_lcnt_install_new_lock_info(&tb->common.rwlock.lcnt, "db_tab",
tb->common.the_name, ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 5937bd64ec..17d20b2f77 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -271,7 +271,7 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_rwmtx_t* lck)
# define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd))
# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval)))
# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_lc_rwmtx_is_rwlocked(lck))
-# define IS_TAB_WLOCKED(tb) erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock)
+# define IS_TAB_WLOCKED(tb) (DB_LOCK_FREE(tb) || erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock))
#else
# define IS_HASH_RLOCKED(tb,hval) (1)
# define IS_HASH_WLOCKED(tb,hval) (1)
@@ -628,7 +628,7 @@ SWord db_unfix_table_hash(DbTableHash *tb)
FixedDeletion* fixdel;
SWord work = 0;
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb)
|| (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock)
&& !tb->common.is_thread_safe));
restart:
@@ -2814,7 +2814,7 @@ begin_resizing(DbTableHash* tb)
if (DB_USING_FINE_LOCKING(tb))
return !erts_atomic_xchg_acqb(&tb->is_resizing, 1);
else
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb));
return 1;
}
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index f038dba89a..06dbe54212 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -344,6 +344,8 @@ typedef struct db_table_common {
#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
+#define DB_LOCK_FREE() (erts_no_schedulers == 1)
+
/*
* tplp is an untagged pointer to a tuple we know is large enough
* and dth is a pointer to a DbTableHash.
diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl
index 4fcc0e65ea..b7fe664f12 100644
--- a/lib/stdlib/test/ets_SUITE.erl
+++ b/lib/stdlib/test/ets_SUITE.erl
@@ -2559,10 +2559,10 @@ write_concurrency(Config) when is_list(Config) ->
NoHashMem = ets:info(No8,memory),
NoHashMem = ets:info(No9,memory),
- true = YesMem > NoHashMem,
- true = YesMem > NoTreeMem,
+ true = YesMem > NoHashMem orelse erlang:system_info(schedulers) == 1,
+ true = YesMem > NoTreeMem orelse erlang:system_info(schedulers) == 1,
true = YesMem > YesTreeMem,
- true = YesTreeMem < NoTreeMem,
+ true = YesTreeMem < NoTreeMem orelse erlang:system_info(schedulers) == 1,
{'EXIT',{badarg,_}} = (catch ets_new(foo,[public,{write_concurrency,foo}])),
{'EXIT',{badarg,_}} = (catch ets_new(foo,[public,{write_concurrency}])),
@@ -4612,10 +4612,18 @@ test_table_counter_concurrency(WhatToTest) ->
ok.
test_table_size_concurrency(Config) when is_list(Config) ->
- test_table_counter_concurrency(size).
+ case erlang:system_info(schedulers) of
+ 1 -> {skip,"Only valid on smp > 1 systems"};
+ _ ->
+ test_table_counter_concurrency(size)
+ end.
test_table_memory_concurrency(Config) when is_list(Config) ->
- test_table_counter_concurrency(memory).
+ case erlang:system_info(schedulers) of
+ 1 -> {skip,"Only valid on smp > 1 systems"};
+ _ ->
+ test_table_counter_concurrency(memory)
+ end.
%% Tests that calling the ets:delete operation on a table T with
%% decentralized counters works while ets:info(T, size) operations are
@@ -4756,7 +4764,7 @@ tab2file_do(FName, Opts, TableType) ->
true = ets:info(Tab2, compressed),
Smp = erlang:system_info(smp_support),
Smp = ets:info(Tab2, read_concurrency),
- Smp = ets:info(Tab2, write_concurrency),
+ Smp = ets:info(Tab2, write_concurrency) orelse erlang:system_info(schedulers) == 1,
true = ets:delete(Tab2),
verify_etsmem(EtsMem).
@@ -8159,15 +8167,15 @@ ets_new(Name, Opts, KeyRange) ->
ets_new(Name, Opts, KeyRange, fun id/1).
ets_new(Name, Opts0, KeyRange, KeyFun) ->
- {CATree, Stimulate, RevOpts} =
- lists:foldl(fun(cat_ord_set, {false, false, Lacc}) ->
- {true, false, [ordered_set | Lacc]};
- (stim_cat_ord_set, {false, false, Lacc}) ->
- {true, true, [ordered_set | Lacc]};
- (Other, {CAT, STIM, Lacc}) ->
- {CAT, STIM, [Other | Lacc]}
+ {_Smp, CATree, Stimulate, RevOpts} =
+ lists:foldl(fun(cat_ord_set, {Smp, false, false, Lacc}) ->
+ {Smp, Smp, false, [ordered_set | Lacc]};
+ (stim_cat_ord_set, {Smp, false, false, Lacc}) ->
+ {Smp, Smp, Smp, [ordered_set | Lacc]};
+ (Other, {Smp, CAT, STIM, Lacc}) ->
+ {Smp, CAT, STIM, [Other | Lacc]}
end,
- {false, false, []},
+ {erlang:system_info(schedulers) > 1,false, false, []},
Opts0),
Opts = lists:reverse(RevOpts),
EtsNewHelper =