summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLukas Larsson <lukas@erlang.org>2019-11-15 11:22:40 +0100
committerLukas Larsson <lukas@erlang.org>2019-11-15 11:31:13 +0100
commitcd0344c6a2c76ff76f988b926b24b8ae53e9f859 (patch)
treeec5f085896742e1e7bfe62f2843ac2c76ba0212a
parent090efc1d2d21ed917caf07898c28c1bf7b884a8e (diff)
downloaderlang-cd0344c6a2c76ff76f988b926b24b8ae53e9f859.tar.gz
erts: Optimize meta table lock to not be taken when +S 1
ets tables can only be accessed from normal schedulers, so when there is only one normal scheduler we don't have to take any locks.
-rw-r--r--erts/emulator/beam/erl_db.c28
-rw-r--r--erts/emulator/beam/erl_db_util.h3
2 files changed, 23 insertions, 8 deletions
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 04b0b1aa57..b4a97b42c8 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -342,6 +342,9 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
{
unsigned bix = atom_val(name) & meta_name_tab_mask;
struct meta_name_tab_entry* bucket = &meta_name_tab[bix];
+ /* Only non-dirty schedulers are allowed to access the metatable
+ The smp 1 optimizations for ETS depend on that */
+ ASSERT(erts_get_scheduler_data() && !ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
*lockp = &meta_name_tab_rwlocks[bix % META_NAME_TAB_LOCK_CNT].lck;
return bucket;
}
@@ -705,20 +708,23 @@ DbTable* db_get_table_aux(Process *p,
DbTable *tb;
/*
- * IMPORTANT: Only scheduler threads are allowed
- * to access tables. Memory management
- * depend on it.
+ * IMPORTANT: Only non-dirty scheduler threads are allowed
+ * to access tables. Memory management depend on it.
*/
- ASSERT(erts_get_scheduler_data());
+ ASSERT(erts_get_scheduler_data() && !ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+
+ if (META_DB_LOCK_FREE())
+ meta_already_locked = 1;
if (is_atom(id)) {
erts_rwmtx_t *mtl;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
if (!meta_already_locked)
erts_rwmtx_rlock(mtl);
- else{
+ else {
ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
- || erts_lc_rwmtx_is_rwlocked(mtl));
+ || erts_lc_rwmtx_is_rwlocked(mtl)
+ || META_DB_LOCK_FREE());
}
tb = NULL;
if (bucket->pu.tb != NULL) {
@@ -780,6 +786,10 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
+
+ if (META_DB_LOCK_FREE())
+ have_lock = 1;
+
if (!have_lock)
erts_rwmtx_rwlock(rwlock);
@@ -841,13 +851,17 @@ static int remove_named_tab(DbTable *tb, int have_lock)
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
ASSERT(is_table_named(tb));
+
+ if (META_DB_LOCK_FREE())
+ have_lock = 1;
+
if (!have_lock && erts_rwmtx_tryrwlock(rwlock) == EBUSY) {
db_unlock(tb, LCK_WRITE);
erts_rwmtx_rwlock(rwlock);
db_lock(tb, LCK_WRITE);
}
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock) || META_DB_LOCK_FREE());
if (bucket->pu.tb == NULL) {
goto done;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 06dbe54212..7846a5c98a 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -344,7 +344,8 @@ typedef struct db_table_common {
#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
-#define DB_LOCK_FREE() (erts_no_schedulers == 1)
+#define META_DB_LOCK_FREE() (erts_no_schedulers == 1)
+#define DB_LOCK_FREE(T) META_DB_LOCK_FREE()
/*
* tplp is an untagged pointer to a tuple we know is large enough