summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Högberg <john@erlang.org>2022-12-06 14:17:29 +0100
committerJohn Högberg <john@erlang.org>2022-12-12 14:54:20 +0100
commite444d1f9e4e782e9ff1407f61251ac79a5cf3ae5 (patch)
tree2a51b138d332529740e3d22e774229fe4b4dc5a8
parentfc3295b1ebeed1235d12983184ba806f2ce2f525 (diff)
downloaderlang-e444d1f9e4e782e9ff1407f61251ac79a5cf3ae5.tar.gz
erts: Fix race in fun loading
Because funs are tied to their canonical module rather than a specific instance of a module, they are "moved over" to the newest instance whenever we reload _the exact same module_ over itself. This quirk, together with how fun entries are updated when a module is loaded, caused a small window where it was possible for a holder of an "old" fun to call the new instance before it was fully loaded. This commit fixes that by versioning funs in roughly the same way we do exports. It also fixes a related bug where fun purging could be messed up if a new module instance was loaded in the middle of purging, solving the issue by disallowing code loading while purging a module with funs.
-rw-r--r--erts/emulator/beam/beam_bif_load.c413
-rw-r--r--erts/emulator/beam/beam_bp.c6
-rw-r--r--erts/emulator/beam/beam_common.h3
-rw-r--r--erts/emulator/beam/beam_debug.c6
-rw-r--r--erts/emulator/beam/beam_load.c4
-rw-r--r--erts/emulator/beam/bif.c2
-rw-r--r--erts/emulator/beam/code_ix.c290
-rw-r--r--erts/emulator/beam/code_ix.h73
-rw-r--r--erts/emulator/beam/emu/bif_instrs.tab4
-rw-r--r--erts/emulator/beam/emu/emu_load.c13
-rw-r--r--erts/emulator/beam/erl_bif_info.c10
-rw-r--r--erts/emulator/beam/erl_bif_trace.c32
-rw-r--r--erts/emulator/beam/erl_fun.c232
-rw-r--r--erts/emulator/beam/erl_fun.h10
-rw-r--r--erts/emulator/beam/erl_lock_check.c3
-rw-r--r--erts/emulator/beam/erl_nif.c19
-rw-r--r--erts/emulator/beam/export.c14
-rw-r--r--erts/emulator/beam/global.h3
-rw-r--r--erts/emulator/beam/jit/arm/beam_asm_global.hpp.pl3
-rw-r--r--erts/emulator/beam/jit/arm/instr_call.cpp6
-rw-r--r--erts/emulator/beam/jit/arm/instr_fun.cpp13
-rw-r--r--erts/emulator/beam/jit/asm_load.c12
-rw-r--r--erts/emulator/beam/jit/beam_jit_common.cpp4
-rw-r--r--erts/emulator/beam/jit/beam_jit_main.cpp8
-rwxr-xr-xerts/emulator/beam/jit/x86/beam_asm_global.hpp.pl3
-rw-r--r--erts/emulator/beam/jit/x86/instr_call.cpp4
-rw-r--r--erts/emulator/beam/jit/x86/instr_fun.cpp13
-rw-r--r--erts/emulator/beam/module.c5
-rw-r--r--erts/emulator/internal_doc/CodeLoading.md8
-rw-r--r--erts/emulator/internal_doc/Tracing.md19
-rw-r--r--erts/emulator/test/code_SUITE.erl30
-rw-r--r--erts/emulator/test/code_SUITE_data/call_fun_before_load.erl45
-rw-r--r--erts/emulator/test/nif_SUITE.erl6
-rw-r--r--erts/emulator/test/sensitive_SUITE.erl2
-rw-r--r--erts/etc/unix/etp.py8
35 files changed, 871 insertions, 455 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 0e33dfa667..c196d51bfe 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -57,7 +57,7 @@ static struct {
ErlFunEntry **funs;
ErlFunEntry *def_funs[10];
Uint fe_size;
- Uint fe_ix;
+ Uint fe_count;
struct erl_module_instance saved_old;
} purge_state;
@@ -88,7 +88,7 @@ init_purge_state(void)
purge_state.funs = &purge_state.def_funs[0];
purge_state.fe_size = sizeof(purge_state.def_funs) / sizeof(purge_state.def_funs[0]);
- purge_state.fe_ix = 0;
+ purge_state.fe_count = 0;
purge_state.saved_old.code_hdr = 0;
}
@@ -305,7 +305,7 @@ struct m {
static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int, int);
static void smp_code_ix_commiter(void*);
-static struct /* Protected by code_write_permission */
+static struct /* Protected by code loading permission */
{
Process* stager;
ErtsThrPrgrLaterOp lop;
@@ -340,8 +340,9 @@ finish_loading_1(BIF_ALIST_1)
int is_blocking = 0;
int do_commit = 0;
- if (!erts_try_seize_code_write_permission(BIF_P)) {
- ERTS_BIF_YIELD1(BIF_TRAP_EXPORT(BIF_finish_loading_1), BIF_P, BIF_ARG_1);
+ if (!erts_try_seize_code_load_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(BIF_TRAP_EXPORT(BIF_finish_loading_1),
+ BIF_P, BIF_ARG_1);
}
/*
@@ -356,11 +357,11 @@ finish_loading_1(BIF_ALIST_1)
n = erts_list_length(BIF_ARG_1);
if (n < 0) {
badarg:
- if (p) {
- erts_free(ERTS_ALC_T_LOADER_TMP, p);
- }
- erts_release_code_write_permission();
- BIF_ERROR(BIF_P, BADARG);
+ if (p) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, p);
+ }
+ erts_release_code_load_permission();
+ BIF_ERROR(BIF_P, BADARG);
}
p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m));
@@ -392,13 +393,13 @@ finish_loading_1(BIF_ALIST_1)
*/
if (n > 1) {
- for (i = 0; i < n; i++) {
- if (erts_has_code_on_load(p[i].code) == am_true) {
- erts_free(ERTS_ALC_T_LOADER_TMP, p);
- erts_release_code_write_permission();
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
- }
- }
+ for (i = 0; i < n; i++) {
+ if (erts_has_code_on_load(p[i].code) == am_true) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, p);
+ erts_release_code_load_permission();
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
+ }
}
/*
@@ -524,7 +525,7 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
if (is_blocking) {
erts_thr_progress_unblock();
}
- erts_release_code_write_permission();
+ erts_release_code_load_permission();
return res;
}
else {
@@ -562,7 +563,7 @@ static void smp_code_ix_commiter(void* null)
#ifdef DEBUG
committer_state.stager = NULL;
#endif
- erts_release_code_write_permission();
+ erts_release_code_load_permission();
erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
if (!ERTS_PROC_IS_EXITING(p)) {
erts_resume(p, ERTS_PROC_LOCK_STATUS);
@@ -700,7 +701,7 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
- if (!erts_try_seize_code_write_permission(BIF_P)) {
+ if (!erts_try_seize_code_load_permission(BIF_P)) {
ERTS_BIF_YIELD1(BIF_TRAP_EXPORT(BIF_delete_module_1), BIF_P, BIF_ARG_1);
}
@@ -837,7 +838,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
- if (!erts_try_seize_code_write_permission(BIF_P)) {
+ if (!erts_try_seize_code_load_permission(BIF_P)) {
ERTS_BIF_YIELD2(BIF_TRAP_EXPORT(BIF_finish_after_on_load_2),
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
@@ -848,7 +849,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
if (!modp || !modp->on_load || !(modp->on_load)->code_hdr
|| !((modp->on_load)->code_hdr)->on_load) {
- erts_release_code_write_permission();
+ erts_release_code_load_permission();
BIF_ERROR(BIF_P, BADARG);
}
@@ -945,7 +946,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
ep->trampoline.not_loaded.deferred = 0;
}
}
- erts_release_code_write_permission();
+ erts_release_code_load_permission();
BIF_RET(am_true);
}
@@ -1751,19 +1752,19 @@ void
erts_purge_state_add_fun(ErlFunEntry *fe)
{
ASSERT(is_value(purge_state.module));
- if (purge_state.fe_ix >= purge_state.fe_size) {
+ if (purge_state.fe_count >= purge_state.fe_size) {
ErlFunEntry **funs;
purge_state.fe_size += 100;
funs = erts_alloc(ERTS_ALC_T_PURGE_DATA,
sizeof(ErlFunEntry *)*purge_state.fe_size);
sys_memcpy((void *) funs,
(void *) purge_state.funs,
- purge_state.fe_ix*sizeof(ErlFunEntry *));
+ purge_state.fe_count*sizeof(ErlFunEntry *));
if (purge_state.funs != &purge_state.def_funs[0])
erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.funs);
purge_state.funs = funs;
}
- purge_state.funs[purge_state.fe_ix++] = fe;
+ purge_state.funs[purge_state.fe_count++] = fe;
}
Export *
@@ -1846,7 +1847,7 @@ finalize_purge_operation(Process *c_p, int succeded)
purge_state.fe_size = sizeof(purge_state.def_funs);
purge_state.fe_size /= sizeof(purge_state.def_funs[0]);
}
- purge_state.fe_ix = 0;
+ purge_state.fe_count = 0;
}
@@ -1864,7 +1865,12 @@ resume_purger(void *unused)
static void
finalize_purge_abort(void *unused)
{
- erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix);
+ /* We're not supposed to land here if we don't have any funs to abort
+ * purging for. */
+ ASSERT(purge_state.fe_count > 0);
+
+ erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_count);
+ erts_release_code_stage_permission();
finalize_purge_operation(NULL, 0);
@@ -1874,176 +1880,189 @@ finalize_purge_abort(void *unused)
BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
{
- if (BIF_P != erts_code_purger)
- BIF_ERROR(BIF_P, EXC_NOTSUP);
+ if (BIF_P != erts_code_purger) {
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+ }
- if (is_not_atom(BIF_ARG_1))
- BIF_ERROR(BIF_P, BADARG);
+ if (is_not_atom(BIF_ARG_1)) {
+ goto raise_badarg;
+ }
switch (BIF_ARG_2) {
case am_prepare:
case am_prepare_on_load: {
- /*
- * Prepare for purge by marking all fun
- * entries referring to the code to purge
- * with "pending purge" markers.
- */
- ErtsCodeIndex code_ix;
- Module* modp;
- Eterm res;
-
- if (is_value(purge_state.module))
- BIF_ERROR(BIF_P, BADARG);
+ /* Prepare for purge by marking all fun entries referring to the code
+ * to purge with "pending purge" markers. */
+ ErtsCodeIndex code_ix;
+ Module* modp;
+ Eterm res;
+
+ if (is_value(purge_state.module)) {
+ goto raise_badarg;
+ }
- code_ix = erts_active_code_ix();
+ /* Fun purging requires that we don't stage new code while any purge
+ * markers are alive, lest we kill them by reloading a new module on
+ * top of an old instance of the same module. */
+ if (!erts_try_seize_code_stage_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(BIF_TRAP_EXPORT(BIF_erts_internal_purge_module_2),
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
- /*
- * Correct module?
- */
- modp = erts_get_module(BIF_ARG_1, code_ix);
- if (!modp)
- res = am_false;
- else {
- /*
- * Any code to purge?
- */
+ code_ix = erts_active_code_ix();
- if (BIF_ARG_2 == am_prepare_on_load) {
+ /* Correct module? */
+ modp = erts_get_module(BIF_ARG_1, code_ix);
+ if (!modp) {
+ res = am_false;
+ } else {
+ /* Any code to purge? */
+ if (BIF_ARG_2 == am_prepare_on_load) {
erts_rwlock_old_code(code_ix);
- } else {
- erts_rlock_old_code(code_ix);
- }
+ } else {
+ erts_rlock_old_code(code_ix);
+ }
- if (BIF_ARG_2 == am_prepare_on_load) {
- ASSERT(modp->on_load);
- ASSERT(modp->on_load->code_hdr);
- purge_state.saved_old = modp->old;
- modp->old = *modp->on_load;
- erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) modp->on_load);
- modp->on_load = 0;
- }
+ if (BIF_ARG_2 == am_prepare_on_load) {
+ ASSERT(modp->on_load);
+ ASSERT(modp->on_load->code_hdr);
+ purge_state.saved_old = modp->old;
+ modp->old = *modp->on_load;
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) modp->on_load);
+ modp->on_load = 0;
+ }
- if (!modp->old.code_hdr)
- res = am_false;
- else {
- erts_mtx_lock(&purge_state.mtx);
- purge_state.module = BIF_ARG_1;
- erts_mtx_unlock(&purge_state.mtx);
- res = am_true;
- erts_fun_purge_prepare(&modp->old);
- }
+ if (!modp->old.code_hdr) {
+ res = am_false;
+ } else {
+ erts_mtx_lock(&purge_state.mtx);
+ purge_state.module = BIF_ARG_1;
+ erts_mtx_unlock(&purge_state.mtx);
+
+ /* Because fun calls always land in the latest instance, there
+ * is no need to set up purge markers if there's current code
+ * for this module. */
+ if (!modp->curr.code_hdr) {
+ /* Set up "pending purge" markers for the funs in this
+ * module. Processes trying to call these funs will be
+ * suspended _before_ calling them, which will then either
+ * crash or succeed when resumed after the purge finishes
+ * or is aborted.
+ *
+ * This guarantees that we won't get any more direct
+ * references into the code while checking for such
+ * funs. */
+ erts_fun_purge_prepare(&modp->old);
+ }
+
+ res = am_true;
+ }
if (BIF_ARG_2 == am_prepare_on_load) {
erts_rwunlock_old_code(code_ix);
- } else {
+ } else {
erts_runlock_old_code(code_ix);
- }
- }
-
- if (res != am_true)
- BIF_RET(res);
- else {
- /*
- * We'll be resumed when all schedulers are guaranteed
- * to see the "pending purge" markers that we've made on
- * all fun entries of the code that we are about to purge.
- * Processes trying to call these funs will be suspended
- * before calling the funs. That is we are guaranteed not
- * to get any more direct references into the code while
- * checking for such references...
- */
- erts_schedule_thr_prgr_later_op(resume_purger,
- NULL,
- &purger_lop_data);
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
- ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
- }
- }
-
- case am_abort: {
- /*
- * Soft purge that detected direct references into the code
- * we set out to purge. Abort the purge.
- */
+ }
+ }
- if (purge_state.module != BIF_ARG_1)
- BIF_ERROR(BIF_P, BADARG);
+ if (res == am_true) {
+ if (purge_state.fe_count == 0) {
+ /* No funs to purge, so we can safely release stage permission
+ * and allow code to be loaded while checking process code. */
+ erts_release_code_stage_permission();
+ }
- erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix);
+ /* Resume ourselves when all schedulers are guaranteed to either
+ * call the newest instance of the module, or see the "pending
+ * purge" markers that we set on all fun entries related to the
+ * code we're about to purge. */
+ erts_schedule_thr_prgr_later_op(resume_purger,
+ NULL,
+ &purger_lop_data);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ }
- /*
- * We need to restore the code addresses of the funs in
- * two stages in order to ensure that we do not get any
- * stale suspended processes due to the purge abort.
- * Restore address pointer (erts_fun_purge_abort_prepare);
- * wait for thread progress; clear pending purge address
- * pointer (erts_fun_purge_abort_finalize), and then
- * resume processes that got suspended
- * (finalize_purge_operation).
- */
- erts_schedule_thr_prgr_later_op(finalize_purge_abort,
- NULL,
- &purger_lop_data);
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
- ERTS_BIF_YIELD_RETURN(BIF_P, am_false);
+ erts_release_code_stage_permission();
+ BIF_RET(res);
}
- case am_complete: {
- ErtsCodeIndex code_ix;
- Module* modp;
- int is_blocking = 0;
- Eterm ret;
- ErtsLiteralArea *literals = NULL;
-
-
- /*
- * We have no direct references into the code.
- * Complete to purge.
- */
+ case am_abort: {
+ /* Soft purge that detected direct references into the code we set out
+ * to purge. Abort the purge. */
+ if (purge_state.module != BIF_ARG_1) {
+ goto raise_badarg;
+ }
- if (purge_state.module != BIF_ARG_1)
- BIF_ERROR(BIF_P, BADARG);
+ if (purge_state.fe_count > 0) {
+ erts_fun_purge_abort_prepare(purge_state.funs,
+ purge_state.fe_count);
+
+ /* We need to restore the code addresses of the funs in two stages
+ * to ensure that we do not get any stale suspended processes due
+ * to the purge abort.
+ *
+ * Restore address pointer (erts_fun_purge_abort_prepare); wait for
+ * thread progress; clear pending purge address pointer
+ * (erts_fun_purge_abort_finalize), and then resume processes that
+ * got suspended (finalize_purge_operation). */
+ erts_schedule_thr_prgr_later_op(finalize_purge_abort,
+ NULL,
+ &purger_lop_data);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_false);
+ }
- if (!erts_try_seize_code_write_permission(BIF_P)) {
- ERTS_BIF_YIELD2(BIF_TRAP_EXPORT(BIF_erts_internal_purge_module_2),
- BIF_P, BIF_ARG_1, BIF_ARG_2);
- }
+ /* No funs to restore, just clean up and return. */
+ finalize_purge_operation(NULL, 0);
+ BIF_RET(am_false);
+ }
- code_ix = erts_active_code_ix();
+ case am_complete: {
+ ErtsCodeIndex code_ix;
+ Module* modp;
+ int is_blocking = 0;
+ Eterm ret;
+ ErtsLiteralArea *literals = NULL;
+
+ /* We have no direct references into the code. Go ahead with the
+ * purge. */
+ if (purge_state.module != BIF_ARG_1) {
+ goto raise_badarg;
+ }
- /*
- * Correct module?
- */
+ if (!erts_try_seize_code_mod_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(BIF_TRAP_EXPORT(BIF_erts_internal_purge_module_2),
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
- if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
- ERTS_BIF_PREP_RET(ret, am_false);
- }
- else {
+ code_ix = erts_active_code_ix();
- erts_rwlock_old_code(code_ix);
+ /* Correct module? */
+ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
+ ERTS_BIF_PREP_RET(ret, am_false);
+ } else {
+ erts_rwlock_old_code(code_ix);
- /*
- * Any code to purge?
- */
- if (!modp->old.code_hdr) {
- ERTS_BIF_PREP_RET(ret, am_false);
- }
- else {
- /*
- * Unload any NIF library
- */
+ /* Any code to purge? */
+ if (!modp->old.code_hdr) {
+ ERTS_BIF_PREP_RET(ret, am_false);
+ } else {
+ /* Unload any NIF library. */
if (modp->old.nif) {
- erts_unload_nif(modp->old.nif);
- modp->old.nif = NULL;
+ erts_unload_nif(modp->old.nif);
+ modp->old.nif = NULL;
}
- /*
- * Remove the old code.
- */
- ASSERT(erts_total_code_size >= modp->old.code_length);
- erts_total_code_size -= modp->old.code_length;
- erts_fun_purge_complete(purge_state.funs, purge_state.fe_ix);
+ /* Remove the old code. */
+ ASSERT(erts_total_code_size >= modp->old.code_length);
+ erts_total_code_size -= modp->old.code_length;
+
+ if (purge_state.fe_count > 0) {
+ erts_fun_purge_complete(purge_state.funs,
+ purge_state.fe_count);
+ }
beam_catches_delmod(modp->old.catches,
modp->old.code_hdr,
@@ -2060,54 +2079,66 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
code_hdr_rw->literal_area = NULL;
}
- erts_remove_from_ranges(modp->old.code_hdr);
+ erts_remove_from_ranges(modp->old.code_hdr);
if (modp->old.code_hdr->are_nifs) {
erts_free(ERTS_ALC_T_PREPARED_CODE,
modp->old.code_hdr->are_nifs);
}
+
#ifndef BEAMASM
erts_free(ERTS_ALC_T_CODE, (void *) modp->old.code_hdr);
#else
-# ifdef ADDRESS_SANITIZER
+# ifdef ADDRESS_SANITIZER
__lsan_unregister_root_region(modp->old.code_hdr,
modp->old.code_length);
-# endif
+# endif
beamasm_purge_module(modp->old.native_module_exec,
modp->old.native_module_rw);
#endif
- modp->old.code_hdr = NULL;
- modp->old.code_length = 0;
- modp->old.catches = BEAM_CATCHES_NIL;
- ERTS_BIF_PREP_RET(ret, am_true);
- }
+ modp->old.code_hdr = NULL;
+ modp->old.code_length = 0;
+ modp->old.catches = BEAM_CATCHES_NIL;
+ ERTS_BIF_PREP_RET(ret, am_true);
+ }
- if (purge_state.saved_old.code_hdr) {
- modp->old = purge_state.saved_old;
- purge_state.saved_old.code_hdr = 0;
- }
- erts_rwunlock_old_code(code_ix);
- }
- if (is_blocking) {
- erts_thr_progress_unblock();
- erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- }
+ if (purge_state.saved_old.code_hdr) {
+ modp->old = purge_state.saved_old;
+ purge_state.saved_old.code_hdr = 0;
+ }
- erts_release_code_write_permission();
+ erts_rwunlock_old_code(code_ix);
+ }
- finalize_purge_operation(BIF_P, ret == am_true);
+ if (is_blocking) {
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ }
+
+ erts_release_code_mod_permission();
+
+ if (purge_state.fe_count > 0) {
+ erts_release_code_stage_permission();
+ }
- if (literals) {
+ finalize_purge_operation(BIF_P, ret == am_true);
+
+ if (literals) {
erts_queue_release_literals(BIF_P, literals);
- }
+ }
- return ret;
+ return ret;
}
default:
- BIF_ERROR(BIF_P, BADARG);
+ raise_badarg:
+ if (purge_state.fe_count > 0) {
+ ASSERT(is_value(purge_state.module));
+ erts_release_code_stage_permission();
+ }
+ BIF_ERROR(BIF_P, BADARG);
}
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 023e67a96a..82bb8c5eb8 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -289,7 +289,7 @@ erts_consolidate_bp_data(BpFunctions* f, int local)
Uint i;
Uint n = f->matched;
- ERTS_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission());
for (i = 0; i < n; i++) {
consolidate_bp_data(fs[i].mod, fs[i].ci_rw, local);
@@ -1397,7 +1397,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
Uint common;
ErtsBpIndex ix = erts_staging_bp_ix();
- ERTS_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission());
g = ci->u.gen_bp;
if (g == 0) {
int i;
@@ -1530,7 +1530,7 @@ clear_function_break(const ErtsCodeInfo *ci, Uint break_flags)
Uint common;
ErtsBpIndex ix = erts_staging_bp_ix();
- ERTS_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission());
if ((g = ci->u.gen_bp) == NULL) {
return 1;
diff --git a/erts/emulator/beam/beam_common.h b/erts/emulator/beam/beam_common.h
index f3725d0d2e..17b700c348 100644
--- a/erts/emulator/beam/beam_common.h
+++ b/erts/emulator/beam/beam_common.h
@@ -280,7 +280,8 @@ void check_monitor_long_schedule(Process *c_p, Uint64 start_time,
extern ErtsCodePtr beam_run_process;
extern ErtsCodePtr beam_normal_exit;
extern ErtsCodePtr beam_exit;
-extern ErtsCodePtr beam_save_calls;
+extern ErtsCodePtr beam_save_calls_export;
+extern ErtsCodePtr beam_save_calls_fun;
extern ErtsCodePtr beam_bif_export_trap;
extern ErtsCodePtr beam_export_trampoline;
extern ErtsCodePtr beam_continue_exit;
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 5ba3da5f51..1cd0621b4e 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -165,7 +165,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
mfa.arity = signed_val(tp[3]);
}
- if (!erts_try_seize_code_write_permission(BIF_P)) {
+ if (!erts_try_seize_code_mod_permission(BIF_P)) {
ERTS_BIF_YIELD2(BIF_TRAP_EXPORT(BIF_erts_debug_breakpoint_2),
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
@@ -188,7 +188,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
erts_thr_progress_unblock();
erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
return res;
error:
@@ -678,7 +678,7 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
case 'F': /* Function definition */
{
ErlFunEntry* fe = (ErlFunEntry *) *ap;
- const ErtsCodeMFA *cmfa = erts_get_fun_mfa(fe);
+ const ErtsCodeMFA *cmfa = erts_get_fun_mfa(fe, erts_active_code_ix());
erts_print(to, to_arg, "fun(`%T`:`%T`/%bpu)", cmfa->module,
cmfa->function, cmfa->arity);
ap++;
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index c7c0a715b5..5f35ac3a8e 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -215,8 +215,8 @@ erts_finish_loading(Binary* magic, Process* c_p,
struct erl_module_instance* inst_p;
Module* mod_tab_p;
- ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() ||
- erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_load_permission() ||
+ erts_thr_progress_is_blocking());
/* Make current code for the module old and insert the new code
* as current. This will fail if there already exists old code
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index bb4d4c4776..41ba66062f 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -5497,7 +5497,7 @@ void erts_init_trap_export(Export *ep, Eterm m, Eterm f, Uint a,
}
#ifdef BEAMASM
- ep->dispatch.addresses[ERTS_SAVE_CALLS_CODE_IX] = beam_save_calls;
+ ep->dispatch.addresses[ERTS_SAVE_CALLS_CODE_IX] = beam_save_calls_export;
#endif
ep->bif_number = -1;
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index d1159dd144..936529353c 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -37,20 +37,24 @@
erts_atomic32_t the_active_code_index;
erts_atomic32_t the_staging_code_index;
-static int code_writing_seized = 0;
-static Process* code_writing_process = NULL;
-struct code_write_queue_item {
- Process *p;
- void (*aux_func)(void *);
- void *aux_arg;
- struct code_write_queue_item* next;
+struct code_permission {
+ erts_mtx_t lock;
+
+ ErtsSchedulerData *scheduler;
+ Process *owner;
+
+ int seized;
+ struct code_permission_queue_item {
+ Process *p;
+ void (*aux_func)(void *);
+ void *aux_arg;
+
+ struct code_permission_queue_item *next;
+ } *queue;
};
-static struct code_write_queue_item* code_write_queue = NULL;
-static erts_mtx_t code_write_permission_mtx;
-#ifdef ERTS_ENABLE_LOCK_CHECK
-static erts_tsd_key_t has_code_write_permission;
-#endif
+static struct code_permission code_mod_permission = {0};
+static struct code_permission code_stage_permission = {0};
void erts_code_ix_init(void)
{
@@ -60,28 +64,31 @@ void erts_code_ix_init(void)
*/
erts_atomic32_init_nob(&the_active_code_index, 0);
erts_atomic32_init_nob(&the_staging_code_index, 0);
- erts_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL,
+
+ erts_mtx_init(&code_mod_permission.lock,
+ "code_mod_permission", NIL,
ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_tsd_key_create(&has_code_write_permission,
- "erts_has_code_write_permission");
-#endif
+ erts_mtx_init(&code_stage_permission.lock,
+ "code_stage_permission", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
CIX_TRACE("init");
}
void erts_start_staging_code_ix(int num_new)
{
beam_catches_start_staging();
+ erts_fun_start_staging();
export_start_staging();
module_start_staging();
erts_start_staging_ranges(num_new);
CIX_TRACE("start");
}
-
void erts_end_staging_code_ix(void)
{
beam_catches_end_staging(1);
+ erts_fun_end_staging(1);
export_end_staging(1);
module_end_staging(1);
erts_end_staging_ranges(1);
@@ -105,121 +112,234 @@ void erts_commit_staging_code_ix(void)
void erts_abort_staging_code_ix(void)
{
beam_catches_end_staging(0);
+ erts_fun_end_staging(0);
export_end_staging(0);
module_end_staging(0);
erts_end_staging_ranges(0);
CIX_TRACE("abort");
}
-static int try_seize_cwp(Process* c_p,
- void (*aux_func)(void *),
- void *aux_arg);
-
#if defined(DEBUG) || defined(ADDRESS_SANITIZER)
# define CWP_DBG_FORCE_TRAP
#endif
-/*
- * Calller _must_ yield if we return 0
- */
-int erts_try_seize_code_write_permission(Process* c_p)
-{
- ASSERT(c_p != NULL);
-
-#ifdef CWP_DBG_FORCE_TRAP
- if (!(c_p->flags & F_DBG_FORCED_TRAP)) {
- c_p->flags |= F_DBG_FORCED_TRAP;
- return 0;
- } else {
- /* back from forced trap */
- c_p->flags &= ~F_DBG_FORCED_TRAP;
- }
+#ifdef ERTS_ENABLE_LOCK_CHECK
+static int has_code_permission(struct code_permission *lock);
#endif
- return try_seize_cwp(c_p, NULL, NULL);
-}
-
-int erts_try_seize_code_write_permission_aux(void (*aux_func)(void *),
- void *aux_arg)
-{
- ASSERT(aux_func != NULL);
- return try_seize_cwp(NULL, aux_func, aux_arg);
-}
-
-static int try_seize_cwp(Process* c_p,
- void (*aux_func)(void *),
- void *aux_arg)
+static int try_seize_code_permission(struct code_permission *perm,
+ Process* c_p,
+ void (*aux_func)(void *),
+ void *aux_arg)
{
int success;
- ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */
- erts_mtx_lock(&code_write_permission_mtx);
- success = !code_writing_seized;
+ ASSERT(!erts_thr_progress_is_blocking()); /* To avoid deadlock */
+
+ erts_mtx_lock(&perm->lock);
+ success = !perm->seized;
+
if (success) {
- code_writing_seized = 1;
- code_writing_process = c_p;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_tsd_set(has_code_write_permission, (void *) 1);
-#endif
- }
- else { /* Already locked */
- struct code_write_queue_item* qitem;
+ if (c_p == NULL) {
+ ASSERT(aux_func);
+ perm->scheduler = erts_get_scheduler_data();
+ }
+
+ perm->owner = c_p;
+ perm->seized = 1;
+ } else { /* Already locked */
+ struct code_permission_queue_item* qitem;
+
qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem));
if (c_p) {
- ASSERT(code_writing_process != c_p);
+ ERTS_LC_ASSERT(perm->owner != c_p);
+
qitem->p = c_p;
qitem->aux_func = NULL;
qitem->aux_arg = NULL;
erts_proc_inc_refc(c_p);
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
- }
- else {
+ } else {
qitem->p = NULL;
qitem->aux_func = aux_func;
qitem->aux_arg = aux_arg;
}
- qitem->next = code_write_queue;
- code_write_queue = qitem;
+
+ qitem->next = perm->queue;
+ perm->queue = qitem;
}
- erts_mtx_unlock(&code_write_permission_mtx);
- return success;
+
+ erts_mtx_unlock(&perm->lock);
+
+ return success;
}
-void erts_release_code_write_permission(void)
-{
- erts_mtx_lock(&code_write_permission_mtx);
- ERTS_LC_ASSERT(erts_has_code_write_permission());
- while (code_write_queue != NULL) { /* unleash the entire herd */
- struct code_write_queue_item* qitem = code_write_queue;
+static void release_code_permission(struct code_permission *perm) {
+ ERTS_LC_ASSERT(has_code_permission(perm));
+
+ erts_mtx_lock(&perm->lock);
+
+ /* Unleash the entire herd */
+ while (perm->queue != NULL) {
+ struct code_permission_queue_item* qitem = perm->queue;
+
if (qitem->p) {
erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
+
if (!ERTS_PROC_IS_EXITING(qitem->p)) {
erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
}
+
erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
erts_proc_dec_refc(qitem->p);
- }
- else { /* aux work*/
+ } else { /* aux work */
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL);
erts_schedule_misc_aux_work((int) esdp->no,
qitem->aux_func,
qitem->aux_arg);
}
- code_write_queue = qitem->next;
- erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
+
+ perm->queue = qitem->next;
+ erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
+ }
+
+ perm->scheduler = NULL;
+ perm->owner = NULL;
+ perm->seized = 0;
+
+ erts_mtx_unlock(&perm->lock);
+}
+
+int erts_try_seize_code_mod_permission_aux(void (*aux_func)(void *),
+ void *aux_arg)
+{
+ ASSERT(aux_func != NULL);
+ return try_seize_code_permission(&code_mod_permission, NULL,
+ aux_func, aux_arg);
+}
+
+int erts_try_seize_code_mod_permission(Process* c_p)
+{
+ ASSERT(c_p != NULL);
+
+#ifdef CWP_DBG_FORCE_TRAP
+ if (!(c_p->flags & F_DBG_FORCED_TRAP)) {
+ c_p->flags |= F_DBG_FORCED_TRAP;
+ return 0;
+ } else {
+ /* back from forced trap */
+ c_p->flags &= ~F_DBG_FORCED_TRAP;
+ }
+#endif
+
+ return try_seize_code_permission(&code_mod_permission, c_p, NULL, NULL);
+}
+
+void erts_release_code_mod_permission(void)
+{
+ release_code_permission(&code_mod_permission);
+}
+
+int erts_try_seize_code_stage_permission(Process* c_p)
+{
+ ASSERT(c_p != NULL);
+
+#ifdef CWP_DBG_FORCE_TRAP
+ if (!(c_p->flags & F_DBG_FORCED_TRAP)) {
+ c_p->flags |= F_DBG_FORCED_TRAP;
+ return 0;
+ } else {
+ /* back from forced trap */
+ c_p->flags &= ~F_DBG_FORCED_TRAP;
+ }
+#endif
+
+ return try_seize_code_permission(&code_stage_permission, c_p, NULL, NULL);
+}
+
+void erts_release_code_stage_permission() {
+ release_code_permission(&code_stage_permission);
+}
+
+int erts_try_seize_code_load_permission(Process* c_p) {
+ ASSERT(c_p != NULL);
+
+#ifdef CWP_DBG_FORCE_TRAP
+ if (!(c_p->flags & F_DBG_FORCED_TRAP)) {
+ c_p->flags |= F_DBG_FORCED_TRAP;
+ return 0;
+ } else {
+ /* back from forced trap */
+ c_p->flags &= ~F_DBG_FORCED_TRAP;
}
- code_writing_seized = 0;
- code_writing_process = NULL;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_tsd_set(has_code_write_permission, (void *) 0);
#endif
- erts_mtx_unlock(&code_write_permission_mtx);
+
+ if (try_seize_code_permission(&code_stage_permission, c_p, NULL, NULL)) {
+ if (try_seize_code_permission(&code_mod_permission, c_p, NULL, NULL)) {
+ return 1;
+ }
+
+ erts_release_code_stage_permission();
+ }
+
+ return 0;
+}
+
+void erts_release_code_load_permission() {
+ erts_release_code_mod_permission();
+ erts_release_code_stage_permission();
}
#ifdef ERTS_ENABLE_LOCK_CHECK
-int erts_has_code_write_permission(void)
+static int has_code_permission(struct code_permission *perm)
{
- return code_writing_seized && erts_tsd_get(has_code_write_permission);
+ const ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ if (esdp && esdp->type == ERTS_SCHED_NORMAL) {
+ int res;
+
+ erts_mtx_lock(&perm->lock);
+
+ res = perm->seized;
+
+ if (esdp->current_process != NULL) {
+ /* If we're running a process, it has to match the owner of the
+ * permission. We don't care about which scheduler we are running
+ * on in order to support holding permissions when yielding (such
+ * as in code purging). */
+ res &= perm->owner == esdp->current_process;
+ } else {
+ /* If we're running an aux job, we crudely assume that this current
+ * job was started by the owner if there is one, and therefore has
+ * permission.
+ *
+ * If we don't have an owner, we assume that we have permission if
+ * we're running on the same scheduler that started the job.
+ *
+ * This is very blunt and only catches _some_ cases where we lack
+ * lack permission, but at least it's better than the old method of
+ * using thread-specific-data. */
+ res &= perm->owner || perm->scheduler == esdp;
+ }
+
+ erts_mtx_unlock(&perm->lock);
+
+ return res;
+ }
+
+ return 0;
+}
+
+int erts_has_code_load_permission() {
+ return erts_has_code_stage_permission() && erts_has_code_mod_permission();
+}
+
+int erts_has_code_stage_permission() {
+ return has_code_permission(&code_stage_permission);
+}
+
+int erts_has_code_mod_permission() {
+ return has_code_permission(&code_mod_permission);
}
#endif
diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h
index c1d128d414..a57cf7b567 100644
--- a/erts/emulator/beam/code_ix.h
+++ b/erts/emulator/beam/code_ix.h
@@ -130,37 +130,76 @@ ErtsCodeIndex erts_active_code_ix(void);
/* Return staging code ix.
* Only used by a process performing code loading/upgrading/deleting/purging.
- * Code write permission must be seized.
+ * Code staging permission must be seized.
*/
ERTS_GLB_INLINE
ErtsCodeIndex erts_staging_code_ix(void);
-/* Try seize exclusive code write permission. Needed for code staging.
+/** @brief Try to seize exclusive code loading permission. That is, both
+ * staging and modification permission.
+ *
* Main process lock (only) must be held.
* System thread progress must not be blocked.
- * Caller must not already hold the code write permission.
- * Caller is suspended and *must* yield if 0 is returned.
+ * Caller must not already have the code modification or staging permissions.
+ * Caller is suspended and *must* yield if 0 is returned. */
+int erts_try_seize_code_load_permission(struct process* c_p);
+
+/** @brief Release code loading permission. Resumes any suspended waiters. */
+void erts_release_code_load_permission(void);
+
+/** @brief Try to seize exclusive code staging permission. Needed for code
+ * loading and purging.
+ *
+ * This is kept separate from code modification permission to allow tracing and
+ * similar during long-running purge operations.
+ *
+ * * Main process lock (only) must be held.
+ * * System thread progress must not be blocked.
+ * * Caller is suspended and *must* yield if 0 is returned.
+ * * Caller must not already have the code modification or staging permissions.
+ *
+ * That is, it is _NOT_ possible to add code modification permission when you
+ * already have staging permission. The other way around is fine however.
+ */
+int erts_try_seize_code_stage_permission(struct process* c_p);
+
+/** @brief Release code stage permission. Resumes any suspended waiters. */
+void erts_release_code_stage_permission(void);
+
+/** @brief Try to seize exclusive code modification permission. Needed for
+ * tracing, breakpoints, and so on.
+ *
+ * This used to be called code_write_permission, but was renamed to break
+ * merges of code that uses the old locking paradigm.
+ *
+ * * Main process lock (only) must be held.
+ * * System thread progress must not be blocked.
+ * * Caller is suspended and *must* yield if 0 is returned.
+ * * Caller must not already have the code modification permission, but may
+ * have staging permission.
*/
-int erts_try_seize_code_write_permission(struct process* c_p);
+int erts_try_seize_code_mod_permission(struct process* c_p);
-/* Try seize exclusive code write permission for aux work.
+/** @brief As \c erts_try_seize_code_mod_permission but for aux work.
+ *
* System thread progress must not be blocked.
* On success return true.
* On failure return false and aux work func(arg) will be scheduled when
- * permission is released. .
+ * permission is released.
*/
-int erts_try_seize_code_write_permission_aux(void (*func)(void *),
- void *arg);
+int erts_try_seize_code_mod_permission_aux(void (*func)(void *),
+ void *arg);
-/* Release code write permission.
- * Will resume any suspended waiters.
- */
-void erts_release_code_write_permission(void);
+/** @brief Release code modification permission. Resumes any suspended
+ * waiters. */
+void erts_release_code_mod_permission(void);
/* Prepare the "staging area" to be a complete copy of the active code.
- * Code write permission must have been seized.
+ *
+ * Code staging permission must have been seized.
+ *
* Must be followed by calls to either "end" and "commit" or "abort" before
- * code write permission can be released.
+ * code staging permission can be released.
*/
void erts_start_staging_code_ix(int num_new);
@@ -180,7 +219,9 @@ void erts_commit_staging_code_ix(void);
void erts_abort_staging_code_ix(void);
#ifdef ERTS_ENABLE_LOCK_CHECK
-int erts_has_code_write_permission(void);
+int erts_has_code_load_permission(void);
+int erts_has_code_stage_permission(void);
+int erts_has_code_mod_permission(void);
#endif
/* module/function/arity can be NIL/NIL/-1 when the MFA is pointing to some
diff --git a/erts/emulator/beam/emu/bif_instrs.tab b/erts/emulator/beam/emu/bif_instrs.tab
index 9f90bf6600..d1ec68168a 100644
--- a/erts/emulator/beam/emu/bif_instrs.tab
+++ b/erts/emulator/beam/emu/bif_instrs.tab
@@ -628,12 +628,12 @@ nif_bif.epilogue() {
i_load_nif() {
//| -no_next
- if (erts_try_seize_code_write_permission(c_p)) {
+ if (erts_try_seize_code_mod_permission(c_p)) {
Eterm result;
PRE_BIF_SWAPOUT(c_p);
result = erts_load_nif(c_p, I, r(0), r(1));
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
ERTS_REQ_PROC_MAIN_LOCK(c_p);
SWAPIN;
diff --git a/erts/emulator/beam/emu/emu_load.c b/erts/emulator/beam/emu/emu_load.c
index 3188325acc..db828bb691 100644
--- a/erts/emulator/beam/emu/emu_load.c
+++ b/erts/emulator/beam/emu/emu_load.c
@@ -543,6 +543,7 @@ int beam_load_finish_emit(LoaderState *stp) {
void beam_load_finalize_code(LoaderState* stp, struct erl_module_instance* inst_p)
{
+ ErtsCodeIndex staging_ix;
unsigned int i;
int on_load = stp->on_load;
unsigned catches;
@@ -555,6 +556,11 @@ void beam_load_finalize_code(LoaderState* stp, struct erl_module_instance* inst_
inst_p->code_hdr = stp->code_hdr;
inst_p->code_length = size;
+ staging_ix = erts_staging_code_ix();
+
+ ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_load_permission() ||
+ erts_thr_progress_is_blocking());
+
/* Update ranges (used for finding a function from a PC value). */
erts_update_ranges(inst_p->code_hdr, size);
@@ -626,7 +632,7 @@ void beam_load_finalize_code(LoaderState* stp, struct erl_module_instance* inst_
lambda->arity - lambda->num_free);
fun_entries[i] = fun_entry;
- if (erts_is_fun_loaded(fun_entry)) {
+ if (erts_is_fun_loaded(fun_entry, staging_ix)) {
/* We've reloaded a module over itself and inherited the old
* instance's fun entries, so we need to undo the reference
* bump in `erts_put_fun_entry2` to make fun purging work. */
@@ -634,6 +640,7 @@ void beam_load_finalize_code(LoaderState* stp, struct erl_module_instance* inst_
}
erts_set_fun_code(fun_entry,
+ staging_ix,
stp->codev + stp->labels[lambda->label].value);
}
@@ -690,7 +697,7 @@ void beam_load_finalize_code(LoaderState* stp, struct erl_module_instance* inst_
*/
ep->trampoline.not_loaded.deferred = (BeamInstr) address;
} else {
- ep->dispatch.addresses[erts_staging_code_ix()] = address;
+ ep->dispatch.addresses[staging_ix] = address;
}
}
@@ -704,7 +711,7 @@ void beam_load_finalize_code(LoaderState* stp, struct erl_module_instance* inst_
entry->name,
entry->arity);
const BeamInstr *addr =
- ep->dispatch.addresses[erts_staging_code_ix()];
+ ep->dispatch.addresses[staging_ix];
if (!ErtsInArea(addr, stp->codev, stp->ci * sizeof(BeamInstr))) {
erts_exit(ERTS_ABORT_EXIT,
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 6ae5f3ce57..c916ed678c 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -3647,7 +3647,7 @@ fun_info_2(BIF_ALIST_2)
if (is_local_fun(funp)) {
fe = funp->entry.fun;
- mfa = erts_get_fun_mfa(fe);
+ mfa = erts_get_fun_mfa(fe, erts_active_code_ix());
} else {
ASSERT(is_external_fun(funp) && funp->next == NULL);
mfa = &(funp->entry.exp)->info.mfa;
@@ -3740,7 +3740,7 @@ fun_info_mfa_1(BIF_ALIST_1)
hp = HAlloc(p, 4);
if (is_local_fun(funp)) {
- mfa = erts_get_fun_mfa(funp->entry.fun);
+ mfa = erts_get_fun_mfa(funp->entry.fun, erts_active_code_ix());
if (mfa == NULL) {
/* Unloaded funs must report their module even though we can't
@@ -5000,20 +5000,20 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_ok);
}
- else if (ERTS_IS_ATOM_STR("code_write_permission", BIF_ARG_1)) {
+ else if (ERTS_IS_ATOM_STR("code_mod_permission", BIF_ARG_1)) {
/*
* Warning: This is a unsafe way of seizing the "lock"
* as there is no automatic unlock if caller terminates.
*/
switch(BIF_ARG_2) {
case am_true:
- if (!erts_try_seize_code_write_permission(BIF_P)) {
+ if (!erts_try_seize_code_mod_permission(BIF_P)) {
ERTS_BIF_YIELD2(BIF_TRAP_EXPORT(BIF_erts_debug_set_internal_state_2),
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
BIF_RET(am_true);
case am_false:
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
BIF_RET(am_true);
}
}
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 795d491545..84be381c3a 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -47,7 +47,7 @@
const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
/*
- * The following variables are protected by code write permission.
+ * The following variables are protected by code modification permission.
*/
static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
@@ -55,7 +55,7 @@ static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
static ErtsTracer erts_default_meta_tracer;
-static struct { /* Protected by code write permission */
+static struct { /* Protected by code modification permission */
int current;
int install;
int local;
@@ -130,7 +130,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
ErtsTracer meta_tracer = erts_tracer_nil;
Uint freason = BADARG;
- if (!erts_try_seize_code_write_permission(p)) {
+ if (!erts_try_seize_code_mod_permission(p)) {
ERTS_BIF_YIELD3(BIF_TRAP_EXPORT(BIF_erts_internal_trace_pattern_3), p, MFA, Pattern, flaglist);
}
finish_bp.current = -1;
@@ -356,7 +356,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
ERTS_BIF_YIELD_RETURN(p, make_small(matches));
}
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
if (matches >= 0) {
return make_small(matches);
@@ -377,7 +377,7 @@ static void smp_bp_finisher(void* null)
#ifdef DEBUG
finish_bp.stager = NULL;
#endif
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
if (!ERTS_PROC_IS_EXITING(p)) {
erts_resume(p, ERTS_PROC_LOCK_STATUS);
@@ -394,8 +394,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
ErtsTracer *meta_tracer)
{
- ERTS_LC_ASSERT(erts_has_code_write_permission() ||
- erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission() ||
+ erts_thr_progress_is_blocking());
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -410,8 +410,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
int erts_is_default_trace_enabled(void)
{
- ERTS_LC_ASSERT(erts_has_code_write_permission() ||
- erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission() ||
+ erts_thr_progress_is_blocking());
return erts_default_trace_pattern_is_on;
}
@@ -553,7 +553,7 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
BIF_ERROR(p, BADARG | EXF_HAS_EXT_INFO);
}
- if (!erts_try_seize_code_write_permission(BIF_P)) {
+ if (!erts_try_seize_code_mod_permission(BIF_P)) {
ERTS_TRACER_CLEAR(&tracer);
ERTS_BIF_YIELD3(BIF_TRAP_EXPORT(BIF_erts_internal_trace_3),
BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
@@ -771,7 +771,7 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
erts_thr_progress_unblock();
erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
ERTS_TRACER_CLEAR(&tracer);
BIF_RET(make_small(matches));
@@ -788,7 +788,7 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
erts_thr_progress_unblock();
erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
BIF_ERROR(p, BADARG);
}
@@ -804,7 +804,7 @@ Eterm trace_info_2(BIF_ALIST_2)
Eterm Key = BIF_ARG_2;
Eterm res;
- if (!erts_try_seize_code_write_permission(p)) {
+ if (!erts_try_seize_code_mod_permission(p)) {
ERTS_BIF_YIELD2(BIF_TRAP_EXPORT(BIF_trace_info_2), p, What, Key);
}
@@ -818,10 +818,10 @@ Eterm trace_info_2(BIF_ALIST_2)
res = trace_info_func(p, What, Key);
} else {
p->fvalue = am_badopt;
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
BIF_ERROR(p, BADARG | EXF_HAS_EXT_INFO);
}
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
if (is_value(res) && is_internal_ref(res))
BIF_TRAP1(erts_await_result, BIF_P, res);
@@ -1583,7 +1583,7 @@ consolidate_event_tracing(ErtsTracingEvent te[])
int
erts_finish_breakpointing(void)
{
- ERTS_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission());
/*
* Memory barriers will be issued for all schedulers *before*
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 17b3e12dd8..ae6116ec19 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -29,6 +29,12 @@
#include "hash.h"
#include "beam_common.h"
+#ifdef DEBUG
+# define IF_DEBUG(x) x
+#else
+# define IF_DEBUG(x)
+#endif
+
/* Container structure for fun entries, allowing us to start `ErlFunEntry` with
* a field other than its `HashBucket`. */
typedef struct erl_fun_entry_container {
@@ -78,22 +84,33 @@ void
erts_fun_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
- if (lock)
- erts_fun_read_lock();
+
+ if (lock) {
+ erts_fun_read_lock();
+ }
+
hash_info(to, to_arg, &erts_fun_table);
- if (lock)
- erts_fun_read_unlock();
+
+ if (lock) {
+ erts_fun_read_unlock();
+ }
}
int erts_fun_table_sz(void)
{
int sz;
int lock = !ERTS_IS_CRASH_DUMPING;
- if (lock)
- erts_fun_read_lock();
+
+ if (lock) {
+ erts_fun_read_lock();
+ }
+
sz = hash_table_sz(&erts_fun_table);
- if (lock)
- erts_fun_read_unlock();
+
+ if (lock) {
+ erts_fun_read_unlock();
+ }
+
return sz;
}
@@ -130,8 +147,8 @@ erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
return &fc->entry;
}
-const ErtsCodeMFA *erts_get_fun_mfa(const ErlFunEntry *fe) {
- ErtsCodePtr address = fe->dispatch.addresses[0];
+const ErtsCodeMFA *erts_get_fun_mfa(const ErlFunEntry *fe, ErtsCodeIndex ix) {
+ ErtsCodePtr address = fe->dispatch.addresses[ix];
if (address != beam_unloaded_fun) {
return erts_find_function_from_pc(address);
@@ -140,16 +157,14 @@ const ErtsCodeMFA *erts_get_fun_mfa(const ErlFunEntry *fe) {
return NULL;
}
-void erts_set_fun_code(ErlFunEntry *fe, ErtsCodePtr address) {
- int i;
-
- for (i = 0; i < ERTS_ADDRESSV_SIZE; i++) {
- fe->dispatch.addresses[i] = address;
- }
+void erts_set_fun_code(ErlFunEntry *fe, ErtsCodeIndex ix, ErtsCodePtr address) {
+ /* Fun entries MUST NOT be updated during a purge! */
+ ASSERT(fe->pend_purge_address == NULL);
+ fe->dispatch.addresses[ix] = address;
}
-int erts_is_fun_loaded(const ErlFunEntry* fe) {
- return fe->dispatch.addresses[0] != beam_unloaded_fun;
+int erts_is_fun_loaded(const ErlFunEntry* fe, ErtsCodeIndex ix) {
+ return fe->dispatch.addresses[ix] != beam_unloaded_fun;
}
static void
@@ -165,38 +180,49 @@ void
erts_erase_fun_entry(ErlFunEntry* fe)
{
erts_fun_write_lock();
- /*
- * We have to check refc again since someone might have looked up
- * the fun entry and incremented refc after last check.
- */
- if (erts_refc_dectest(&fe->refc, -1) <= 0)
- {
- if (erts_is_fun_loaded(fe)) {
+
+ /* We have to check refc again since someone might have looked up
+ * the fun entry and incremented refc after last check. */
+ if (erts_refc_dectest(&fe->refc, -1) <= 0) {
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+
+ if (erts_is_fun_loaded(fe, code_ix)) {
erts_exit(ERTS_ERROR_EXIT,
"Internal error: "
"Invalid reference count found on #Fun<%T.%d.%d>: "
" About to erase fun still referred by code.\n",
fe->module, fe->old_index, fe->old_uniq);
}
+
erts_erase_fun_entry_unlocked(fe);
}
+
erts_fun_write_unlock();
}
+struct fun_prepare_purge_args {
+ struct erl_module_instance* modp;
+ ErtsCodeIndex code_ix;
+};
+
static void fun_purge_foreach(ErlFunEntryContainer *fc,
- struct erl_module_instance* modp)
+ struct fun_prepare_purge_args *args)
{
- const char *fun_addr, *mod_start;
+ struct erl_module_instance* modp = args->modp;
ErlFunEntry *fe = &fc->entry;
+ const char *mod_start;
+ ErtsCodePtr fun_addr;
- fun_addr = (const char*)fe->dispatch.addresses[0];
+ fun_addr = fe->dispatch.addresses[args->code_ix];
mod_start = (const char*)modp->code_hdr;
- if (ErtsInArea(fun_addr, mod_start, modp->code_length)) {
- fe->pend_purge_address = fe->dispatch.addresses[0];
+ if (ErtsInArea((const char*)fun_addr, mod_start, modp->code_length)) {
+ ASSERT(fe->pend_purge_address == NULL);
+
+ fe->pend_purge_address = fun_addr;
ERTS_THR_WRITE_MEMORY_BARRIER;
- erts_set_fun_code(fe, beam_unloaded_fun);
+ fe->dispatch.addresses[args->code_ix] = beam_unloaded_fun;
erts_purge_state_add_fun(fe);
}
@@ -205,46 +231,68 @@ static void fun_purge_foreach(ErlFunEntryContainer *fc,
void
erts_fun_purge_prepare(struct erl_module_instance* modp)
{
- erts_fun_read_lock();
- hash_foreach(&erts_fun_table, (HFOREACH_FUN)fun_purge_foreach, modp);
- erts_fun_read_unlock();
+ struct fun_prepare_purge_args args = {modp, erts_active_code_ix()};
+
+ ERTS_LC_ASSERT(erts_has_code_stage_permission());
+
+ erts_fun_write_lock();
+ hash_foreach(&erts_fun_table, (HFOREACH_FUN)fun_purge_foreach, &args);
+ erts_fun_write_unlock();
}
void
erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no)
{
- Uint ix;
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ Uint fun_ix;
- for (ix = 0; ix < no; ix++) {
- ErlFunEntry *fe = funs[ix];
+ ERTS_LC_ASSERT(erts_has_code_stage_permission());
- if (fe->dispatch.addresses[0] == beam_unloaded_fun) {
- erts_set_fun_code(fe, fe->pend_purge_address);
- }
+ for (fun_ix = 0; fun_ix < no; fun_ix++) {
+ ErlFunEntry *fe = funs[fun_ix];
+
+ ASSERT(fe->dispatch.addresses[code_ix] == beam_unloaded_fun);
+ fe->dispatch.addresses[code_ix] = fe->pend_purge_address;
}
}
void
erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no)
{
- Uint ix;
+ IF_DEBUG(ErtsCodeIndex code_ix = erts_active_code_ix();)
+ Uint fun_ix;
- for (ix = 0; ix < no; ix++) {
- funs[ix]->pend_purge_address = NULL;
+ ERTS_LC_ASSERT(erts_has_code_stage_permission());
+
+ for (fun_ix = 0; fun_ix < no; fun_ix++) {
+ ErlFunEntry *fe = funs[fun_ix];
+
+ /* The abort_prepare step should have set the active address to the
+ * actual one. */
+ ASSERT(fe->dispatch.addresses[code_ix] != beam_unloaded_fun);
+ fe->pend_purge_address = NULL;
}
}
void
erts_fun_purge_complete(ErlFunEntry **funs, Uint no)
{
+ IF_DEBUG(ErtsCodeIndex code_ix = erts_active_code_ix();)
Uint ix;
+ ERTS_LC_ASSERT(erts_has_code_stage_permission());
+
for (ix = 0; ix < no; ix++) {
- ErlFunEntry *fe = funs[ix];
- fe->pend_purge_address = NULL;
- if (erts_refc_dectest(&fe->refc, 0) == 0)
- erts_erase_fun_entry(fe);
+ ErlFunEntry *fe = funs[ix];
+
+ ASSERT(fe->dispatch.addresses[code_ix] == beam_unloaded_fun);
+ fe->pend_purge_address = NULL;
+
+ if (erts_refc_dectest(&fe->refc, 0) == 0) {
+ erts_erase_fun_entry(fe);
+ }
}
+
ERTS_THR_WRITE_MEMORY_BARRIER;
}
@@ -292,14 +340,11 @@ ErlFunThing *erts_new_local_fun_thing(Process *p, ErlFunEntry *fe,
#ifdef DEBUG
{
- /* FIXME: This assertion can fail because it may point to new code that
- * has not been committed yet. This is an actual bug but the fix is too
- * too involved and risky to release in a patch.
- *
- * As this problem has existed since the introduction of funs and is
- * very unlikely to cause actual issues in the wild, we've decided to
- * postpone the fix until OTP 26. See OTP-18016 for details. */
- const ErtsCodeMFA *mfa = erts_get_fun_mfa(fe);
+ /* Note that `mfa` may be NULL if the fun is currently being purged. We
+ * ignore this since it's not an error and we only need `mfa` to
+ * sanity-check the arity at this point. If the fun is called while in
+ * this state, the `error_handler` module will take care of it. */
+ const ErtsCodeMFA *mfa = erts_get_fun_mfa(fe, erts_active_code_ix());
ASSERT(!mfa || funp->arity == mfa->arity - num_free);
ASSERT(arity == fe->arity);
}
@@ -312,6 +357,7 @@ ErlFunThing *erts_new_local_fun_thing(Process *p, ErlFunEntry *fe,
struct dump_fun_foreach_args {
fmtfn_t to;
void *to_arg;
+ ErtsCodeIndex code_ix;
};
static void
@@ -323,21 +369,27 @@ dump_fun_foreach(ErlFunEntryContainer *fc, struct dump_fun_foreach_args *args)
erts_print(args->to, args->to_arg, "Module: %T\n", fe->module);
erts_print(args->to, args->to_arg, "Uniq: %d\n", fe->old_uniq);
erts_print(args->to, args->to_arg, "Index: %d\n",fe->old_index);
- erts_print(args->to, args->to_arg, "Address: %p\n", fe->dispatch.addresses[0]);
- erts_print(args->to, args->to_arg, "Refc: %ld\n", erts_refc_read(&fe->refc, 1));
+ erts_print(args->to, args->to_arg, "Address: %p\n",
+ fe->dispatch.addresses[args->code_ix]);
+ erts_print(args->to, args->to_arg, "Refc: %ld\n",
+ erts_refc_read(&fe->refc, 1));
}
void
erts_dump_fun_entries(fmtfn_t to, void *to_arg)
{
- struct dump_fun_foreach_args args = {to, to_arg};
+ struct dump_fun_foreach_args args = {to, to_arg, erts_active_code_ix()};
int lock = !ERTS_IS_CRASH_DUMPING;
- if (lock)
- erts_fun_read_lock();
+ if (lock) {
+ erts_fun_read_lock();
+ }
+
hash_foreach(&erts_fun_table, (HFOREACH_FUN)dump_fun_foreach, &args);
- if (lock)
- erts_fun_read_unlock();
+
+ if (lock) {
+ erts_fun_read_unlock();
+ }
}
static HashValue
@@ -365,7 +417,9 @@ fun_cmp(ErlFunEntryContainer* obj1, ErlFunEntryContainer* obj2)
static ErlFunEntryContainer*
fun_alloc(ErlFunEntryContainer* template)
{
- ErlFunEntryContainer* obj;
+ ErlFunEntryContainer *obj;
+ ErtsDispatchable *disp;
+ ErtsCodeIndex ix;
obj = (ErlFunEntryContainer *) erts_alloc(ERTS_ALC_T_FUN_ENTRY,
sizeof(ErlFunEntryContainer));
@@ -374,7 +428,14 @@ fun_alloc(ErlFunEntryContainer* template)
erts_refc_init(&obj->entry.refc, -1);
- erts_set_fun_code(&obj->entry, beam_unloaded_fun);
+ disp = &obj->entry.dispatch;
+ for (ix = 0; ix < ERTS_NUM_CODE_IX; ix++) {
+ disp->addresses[ix] = beam_unloaded_fun;
+ }
+
+#ifdef BEAMASM
+ disp->addresses[ERTS_SAVE_CALLS_CODE_IX] = beam_save_calls_fun;
+#endif
obj->entry.pend_purge_address = NULL;
@@ -386,3 +447,46 @@ fun_free(ErlFunEntryContainer* obj)
{
erts_free(ERTS_ALC_T_FUN_ENTRY, (void *) obj);
}
+
+struct fun_stage_args {
+ ErtsCodeIndex src_ix;
+ ErtsCodeIndex dst_ix;
+};
+
+static void fun_stage_foreach(ErlFunEntryContainer *fc,
+ struct fun_stage_args *args)
+{
+ ErtsDispatchable *disp = &fc->entry.dispatch;
+
+ /* Fun entries MUST NOT be updated during a purge! */
+ ASSERT(fc->entry.pend_purge_address == NULL);
+
+ disp->addresses[args->dst_ix] = disp->addresses[args->src_ix];
+}
+
+IF_DEBUG(static ErtsCodeIndex debug_fun_load_ix = 0;)
+
+void erts_fun_start_staging(void)
+{
+ ErtsCodeIndex dst_ix = erts_staging_code_ix();
+ ErtsCodeIndex src_ix = erts_active_code_ix();
+ struct fun_stage_args args = {src_ix, dst_ix};
+
+ ERTS_LC_ASSERT(erts_has_code_stage_permission());
+ ASSERT(dst_ix != src_ix);
+ ASSERT(debug_fun_load_ix == ~0);
+
+ erts_fun_write_lock();
+ hash_foreach(&erts_fun_table, (HFOREACH_FUN)fun_stage_foreach, &args);
+ erts_fun_write_unlock();
+
+ IF_DEBUG(debug_fun_load_ix = dst_ix);
+}
+
+void erts_fun_end_staging(int commit)
+{
+ ERTS_LC_ASSERT((erts_active_code_ix() == erts_active_code_ix()) ||
+ erts_has_code_stage_permission());
+ ASSERT(debug_fun_load_ix == erts_staging_code_ix());
+ IF_DEBUG(debug_fun_load_ix = ~0);
+}
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index 4276e9a999..43f3ec3b22 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -97,14 +97,14 @@ int erts_fun_table_sz(void);
ErlFunEntry* erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
const byte* uniq, int index, int arity);
-const ErtsCodeMFA *erts_get_fun_mfa(const ErlFunEntry *fe);
+const ErtsCodeMFA *erts_get_fun_mfa(const ErlFunEntry *fe, ErtsCodeIndex ix);
-void erts_set_fun_code(ErlFunEntry *fe, ErtsCodePtr address);
+void erts_set_fun_code(ErlFunEntry *fe, ErtsCodeIndex ix, ErtsCodePtr address);
ERTS_GLB_INLINE
ErtsCodePtr erts_get_fun_code(ErlFunEntry *fe, ErtsCodeIndex ix);
-int erts_is_fun_loaded(const ErlFunEntry* fe);
+int erts_is_fun_loaded(const ErlFunEntry* fe, ErtsCodeIndex ix);
void erts_erase_fun_entry(ErlFunEntry* fe);
void erts_cleanup_funs(ErlFunThing* funp);
@@ -116,6 +116,10 @@ void erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no);
void erts_fun_purge_complete(ErlFunEntry **funs, Uint no);
void erts_dump_fun_entries(fmtfn_t, void *);
+
+void erts_fun_start_staging(void);
+void erts_fun_end_staging(int commit);
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index f299889dcc..be1c586ca1 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -97,7 +97,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "dist_entry_links", "address" },
{ "update_persistent_term_permission", NULL },
{ "persistent_term_delete_permission", NULL },
- { "code_write_permission", NULL },
+ { "code_stage_permission", NULL },
+ { "code_mod_permission", NULL },
{ "purge_state", NULL },
{ "proc_status", "pid" },
{ "proc_trace", "pid" },
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index fb4ffc003f..da448ec925 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -2431,7 +2431,7 @@ int enif_vsnprintf(char* buffer, size_t size, const char *format, va_list ap)
/*
* Sentinel node in circular list of all resource types.
- * List protected by code_write_permission.
+ * List protected by code modification permission.
*/
struct enif_resource_type_t resource_type_list;
@@ -2579,7 +2579,7 @@ ErlNifResourceType* open_resource_type(ErlNifEnv* env,
ErlNifResourceFlags op = flags;
Eterm module_am, name_am;
- ERTS_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission());
module_am = make_atom(env->mod_nif->mod->module);
name_am = enif_make_atom(env, name_str);
@@ -4371,7 +4371,7 @@ void erts_add_taint(Eterm mod_atom)
struct tainted_module_t *first, *t;
ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)
- || erts_has_code_write_permission());
+ || erts_has_code_mod_permission());
first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
for (t=first ; t; t=t->next) {
@@ -4944,7 +4944,7 @@ static void patch_call_nif_early(ErlNifEntry* entry,
{
int i;
- ERTS_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission());
ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_nif_call_tab_lock));
for (i=0; i < entry->num_of_funcs; i++)
@@ -5074,10 +5074,11 @@ static void load_nif_2nd_finisher(void* vlib)
int i;
/*
- * We seize code write permission only to avoid any trace breakpoints
- * to change while we patch the op_call_nif_WWW instruction.
+ * We seize code modification permission only to avoid any trace
+ * breakpoints to change while we patch the op_call_nif_WWW instruction.
*/
- if (!erts_try_seize_code_write_permission_aux(load_nif_2nd_finisher, vlib)) {
+ if (!erts_try_seize_code_mod_permission_aux(load_nif_2nd_finisher,
+ vlib)) {
return;
}
@@ -5120,7 +5121,7 @@ static void load_nif_2nd_finisher(void* vlib)
}
erts_mtx_unlock(&lib->load_mtx);
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
if (fin) {
UWord bytes = sizeof_ErtsNifFinish(lib->entry.num_of_funcs);
@@ -5204,7 +5205,7 @@ erts_unload_nif(struct erl_module_nif* lib)
ASSERT(lib != NULL);
ASSERT(lib->mod != NULL);
- ERTS_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission());
erts_tracer_nif_clear();
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index c9e02c064c..b1fa82dfcb 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -293,7 +293,7 @@ erts_export_put(Eterm mod, Eterm func, unsigned int arity)
res = ee->ep;
#ifdef BEAMASM
- res->dispatch.addresses[ERTS_SAVE_CALLS_CODE_IX] = beam_save_calls;
+ res->dispatch.addresses[ERTS_SAVE_CALLS_CODE_IX] = beam_save_calls_export;
#endif
return res;
@@ -337,7 +337,7 @@ erts_export_get_or_make_stub(Eterm mod, Eterm func, unsigned int arity)
#ifdef BEAMASM
ep->dispatch.addresses[ERTS_SAVE_CALLS_CODE_IX] =
- beam_save_calls;
+ beam_save_calls_export;
#endif
ASSERT(ep);
@@ -387,7 +387,7 @@ Export *export_get(Export *e)
return entry ? entry->ep : NULL;
}
-IF_DEBUG(static ErtsCodeIndex debug_start_load_ix = 0;)
+IF_DEBUG(static ErtsCodeIndex debug_export_load_ix = 0;)
void export_start_staging(void)
@@ -399,7 +399,7 @@ void export_start_staging(void)
int i;
ASSERT(dst_ix != src_ix);
- ASSERT(debug_start_load_ix == -1);
+ ASSERT(debug_export_load_ix == ~0);
export_staging_lock();
/*
@@ -426,12 +426,12 @@ void export_start_staging(void)
}
export_staging_unlock();
- IF_DEBUG(debug_start_load_ix = dst_ix);
+ IF_DEBUG(debug_export_load_ix = dst_ix);
}
void export_end_staging(int commit)
{
- ASSERT(debug_start_load_ix == erts_staging_code_ix());
- IF_DEBUG(debug_start_load_ix = -1);
+ ASSERT(debug_export_load_ix == erts_staging_code_ix());
+ IF_DEBUG(debug_export_load_ix = ~0);
}
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 699a62a134..d2d61d2541 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -122,7 +122,8 @@ extern void erts_add_taint(Eterm mod_atom);
extern Eterm erts_nif_taints(Process* p);
extern void erts_print_nif_taints(fmtfn_t to, void* to_arg);
-/* Loads the specified NIF. The caller must have code write permission. */
+/* Loads the specified NIF. The caller must have code modification
+ * permission. */
Eterm erts_load_nif(Process *c_p, ErtsCodePtr I, Eterm filename, Eterm args);
void erts_unload_nif(struct erl_module_nif* nif);
diff --git a/erts/emulator/beam/jit/arm/beam_asm_global.hpp.pl b/erts/emulator/beam/jit/arm/beam_asm_global.hpp.pl
index 893830d180..7b9e5b4d95 100644
--- a/erts/emulator/beam/jit/arm/beam_asm_global.hpp.pl
+++ b/erts/emulator/beam/jit/arm/beam_asm_global.hpp.pl
@@ -54,7 +54,8 @@ my @beam_global_funcs = qw(
dispatch_bif
dispatch_nif
dispatch_return
- dispatch_save_calls
+ dispatch_save_calls_export
+ dispatch_save_calls_fun
export_trampoline
fconv_shared
get_sint64_shared
diff --git a/erts/emulator/beam/jit/arm/instr_call.cpp b/erts/emulator/beam/jit/arm/instr_call.cpp
index 58bbd45a42..2435ef6b22 100644
--- a/erts/emulator/beam/jit/arm/instr_call.cpp
+++ b/erts/emulator/beam/jit/arm/instr_call.cpp
@@ -64,12 +64,12 @@ void BeamModuleAssembler::emit_i_call_only(const ArgLabel &CallTarget) {
a.b(resolve_beam_label(CallTarget, disp128MB));
}
-/* Handles save_calls. When the active code index is ERTS_SAVE_CALLS_CODE_IX,
- * all remote calls will land here.
+/* Handles save_calls for remote calls. When the active code index is
+ * ERTS_SAVE_CALLS_CODE_IX, all remote calls will land here.
*
* Export entry is in ARG1, return address is in LR (x30). Both of these must
* be preserved since this runs between caller and callee. */
-void BeamGlobalAssembler::emit_dispatch_save_calls() {
+void BeamGlobalAssembler::emit_dispatch_save_calls_export() {
a.str(ARG1, TMP_MEM1q);
emit_enter_runtime_frame();
diff --git a/erts/emulator/beam/jit/arm/instr_fun.cpp b/erts/emulator/beam/jit/arm/instr_fun.cpp
index c236281ace..209a900c97 100644
--- a/erts/emulator/beam/jit/arm/instr_fun.cpp
+++ b/erts/emulator/beam/jit/arm/instr_fun.cpp
@@ -135,6 +135,19 @@ void BeamGlobalAssembler::emit_handle_call_fun_error() {
}
}
+/* Handles save_calls for local funs, which is a side-effect of our calling
+ * convention. Fun entry is in ARG1.
+ *
+ * When the active code index is ERTS_SAVE_CALLS_CODE_IX, all local fun calls
+ * will land here. */
+void BeamGlobalAssembler::emit_dispatch_save_calls_fun() {
+ /* Keep going with the actual code index. */
+ a.mov(TMP1, imm(&the_active_code_index));
+ a.ldr(TMP1.w(), arm::Mem(TMP1));
+
+ branch(emit_setup_dispatchable_call(ARG1, TMP1));
+}
+
/* `call_fun` instructions land here to set up their environment before jumping
* to the actual implementation.
*
diff --git a/erts/emulator/beam/jit/asm_load.c b/erts/emulator/beam/jit/asm_load.c
index 9511286f42..2172307ece 100644
--- a/erts/emulator/beam/jit/asm_load.c
+++ b/erts/emulator/beam/jit/asm_load.c
@@ -871,7 +871,11 @@ load_error:
void beam_load_finalize_code(LoaderState *stp,
struct erl_module_instance *inst_p) {
- int staging_ix, code_size, i;
+ ErtsCodeIndex staging_ix;
+ int code_size, i;
+
+ ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_load_permission() ||
+ erts_thr_progress_is_blocking());
code_size = beamasm_get_header(stp->ba, &stp->code_hdr);
erts_total_code_size += code_size;
@@ -948,14 +952,16 @@ void beam_load_finalize_code(LoaderState *stp,
lambda->index,
lambda->arity - lambda->num_free);
- if (erts_is_fun_loaded(fun_entry)) {
+ if (erts_is_fun_loaded(fun_entry, staging_ix)) {
/* We've reloaded a module over itself and inherited the old
* instance's fun entries, so we need to undo the reference
* bump in `erts_put_fun_entry2` to make fun purging work. */
erts_refc_dectest(&fun_entry->refc, 1);
}
- erts_set_fun_code(fun_entry, beamasm_get_lambda(stp->ba, i));
+ erts_set_fun_code(fun_entry,
+ staging_ix,
+ beamasm_get_lambda(stp->ba, i));
beamasm_patch_lambda(stp->ba,
stp->native_module_rw,
diff --git a/erts/emulator/beam/jit/beam_jit_common.cpp b/erts/emulator/beam/jit/beam_jit_common.cpp
index ec874d0072..ab90d1d242 100644
--- a/erts/emulator/beam/jit/beam_jit_common.cpp
+++ b/erts/emulator/beam/jit/beam_jit_common.cpp
@@ -595,13 +595,13 @@ Eterm beam_jit_call_nif(Process *c_p,
enum beam_jit_nif_load_ret beam_jit_load_nif(Process *c_p,
ErtsCodePtr I,
Eterm *reg) {
- if (erts_try_seize_code_write_permission(c_p)) {
+ if (erts_try_seize_code_mod_permission(c_p)) {
Eterm result;
PROCESS_MAIN_CHK_LOCKS((c_p));
ERTS_UNREQ_PROC_MAIN_LOCK((c_p));
result = erts_load_nif(c_p, I, reg[0], reg[1]);
- erts_release_code_write_permission();
+ erts_release_code_mod_permission();
ERTS_REQ_PROC_MAIN_LOCK(c_p);
if (ERTS_LIKELY(is_value(result))) {
diff --git a/erts/emulator/beam/jit/beam_jit_main.cpp b/erts/emulator/beam/jit/beam_jit_main.cpp
index 84c396b3fe..ec883275da 100644
--- a/erts/emulator/beam/jit/beam_jit_main.cpp
+++ b/erts/emulator/beam/jit/beam_jit_main.cpp
@@ -55,7 +55,8 @@ ErtsCodePtr beam_exit;
ErtsCodePtr beam_export_trampoline;
ErtsCodePtr beam_bif_export_trap;
ErtsCodePtr beam_continue_exit;
-ErtsCodePtr beam_save_calls;
+ErtsCodePtr beam_save_calls_export;
+ErtsCodePtr beam_save_calls_fun;
ErtsCodePtr beam_unloaded_fun;
/* NOTE These should be the only variables containing trace instructions.
@@ -87,7 +88,7 @@ static void install_bifs(void) {
int i;
ASSERT(beam_export_trampoline != NULL);
- ASSERT(beam_save_calls != NULL);
+ ASSERT(beam_save_calls_export != NULL);
for (i = 0; i < BIF_SIZE; i++) {
BifEntry *entry;
@@ -336,7 +337,8 @@ void beamasm_init() {
/* These instructions rely on register contents, and can only be reached
* from a `call_ext_*`-instruction or trapping from the emulator, hence the
* lack of wrapper functions. */
- beam_save_calls = (ErtsCodePtr)bga->get_dispatch_save_calls();
+ beam_save_calls_export = (ErtsCodePtr)bga->get_dispatch_save_calls_export();
+ beam_save_calls_fun = (ErtsCodePtr)bga->get_dispatch_save_calls_fun();
beam_export_trampoline = (ErtsCodePtr)bga->get_export_trampoline();
/* Used when trappping to Erlang code from the emulator, setting up
diff --git a/erts/emulator/beam/jit/x86/beam_asm_global.hpp.pl b/erts/emulator/beam/jit/x86/beam_asm_global.hpp.pl
index 61978275ef..8a658877d9 100755
--- a/erts/emulator/beam/jit/x86/beam_asm_global.hpp.pl
+++ b/erts/emulator/beam/jit/x86/beam_asm_global.hpp.pl
@@ -43,7 +43,8 @@ my @beam_global_funcs = qw(
dispatch_bif
dispatch_nif
dispatch_return
- dispatch_save_calls
+ dispatch_save_calls_export
+ dispatch_save_calls_fun
export_trampoline
garbage_collect
generic_bp_global
diff --git a/erts/emulator/beam/jit/x86/instr_call.cpp b/erts/emulator/beam/jit/x86/instr_call.cpp
index f2d9583533..bde7fe88f7 100644
--- a/erts/emulator/beam/jit/x86/instr_call.cpp
+++ b/erts/emulator/beam/jit/x86/instr_call.cpp
@@ -90,11 +90,11 @@ void BeamModuleAssembler::emit_i_call_only(const ArgLabel &CallDest) {
a.jmp(resolve_beam_label(CallDest));
}
-/* Handles save_calls. Export entry is in RET.
+/* Handles save_calls for export entries. Export entry is in RET.
*
* When the active code index is ERTS_SAVE_CALLS_CODE_IX, all remote calls will
* land here. */
-void BeamGlobalAssembler::emit_dispatch_save_calls() {
+void BeamGlobalAssembler::emit_dispatch_save_calls_export() {
a.mov(TMP_MEM1q, RET);
emit_enter_runtime();
diff --git a/erts/emulator/beam/jit/x86/instr_fun.cpp b/erts/emulator/beam/jit/x86/instr_fun.cpp
index f51c81f4c9..13d47b476e 100644
--- a/erts/emulator/beam/jit/x86/instr_fun.cpp
+++ b/erts/emulator/beam/jit/x86/instr_fun.cpp
@@ -143,6 +143,19 @@ void BeamGlobalAssembler::emit_handle_call_fun_error() {
}
}
+/* Handles save_calls for local funs, which is a side-effect of our calling
+ * convention. Fun entry is in RET.
+ *
+ * When the active code index is ERTS_SAVE_CALLS_CODE_IX, all local fun calls
+ * will land here. */
+void BeamGlobalAssembler::emit_dispatch_save_calls_fun() {
+ /* Keep going with the actual code index. */
+ a.mov(ARG1, imm(&the_active_code_index));
+ a.mov(ARG1d, x86::dword_ptr(ARG1));
+
+ a.jmp(emit_setup_dispatchable_call(RET, ARG1));
+}
+
/* `call_fun` instructions land here to set up their environment before jumping
* to the actual implementation.
*
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index ef16a6cf54..ed3751e74d 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -162,8 +162,7 @@ static Module* put_module(Eterm mod, IndexTable* mod_tab)
Module*
erts_put_module(Eterm mod)
{
- ERTS_LC_ASSERT(erts_initialized == 0
- || erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_load_permission());
return put_module(mod, &module_tables[erts_staging_code_ix()]);
}
@@ -206,7 +205,7 @@ void *erts_writable_code_ptr(struct erl_module_instance* modi,
const void *ptr) {
const char *code_start, *code_end, *ptr_raw;
- ERTS_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_mod_permission());
code_start = (char*)modi->code_hdr;
code_end = code_start + modi->code_length;
diff --git a/erts/emulator/internal_doc/CodeLoading.md b/erts/emulator/internal_doc/CodeLoading.md
index fa5bba0643..83d3d8048b 100644
--- a/erts/emulator/internal_doc/CodeLoading.md
+++ b/erts/emulator/internal_doc/CodeLoading.md
@@ -41,8 +41,8 @@ only be done by one loader process at a time. A second loader process
trying to enter finishing phase will be suspended until the first
loader is done. This will only block the process, the scheduler is
free to schedule other work while the second loader is waiting. (See
-`erts_try_seize_code_write_permission` and
-`erts_release_code_write_permission`).
+`erts_try_seize_code_load_permission` and
+`erts_release_code_load_permission`).
The ability to prepare several modules in parallel is not currently
used as almost all code loading is serialized by the code\_server
@@ -101,7 +101,7 @@ result of a half loaded module.
The finishing phase is carried out in the following sequence by the
BIF `erlang:finish_loading`:
-1. Seize exclusive code write permission (suspend process if needed
+1. Seize exclusive code load permission (suspend process if needed
until we get it).
2. Make a full copy of all the active access structures. This copy is
@@ -119,7 +119,7 @@ BIF `erlang:finish_loading`:
6. After thread progress, commit the staging area by assigning
`the_staging_code_index` to `the_active_code_index`.
-7. Release the code write permission allowing other processes to stage
+7. Release the code load permission allowing other processes to stage
new code.
8. Resume the loader process allowing it to return from
diff --git a/erts/emulator/internal_doc/Tracing.md b/erts/emulator/internal_doc/Tracing.md
index f0182daad8..dbd924bb4a 100644
--- a/erts/emulator/internal_doc/Tracing.md
+++ b/erts/emulator/internal_doc/Tracing.md
@@ -127,7 +127,8 @@ aligned write operation on all hardware architectures we use.
This is a simplified sequence describing what `trace_pattern` goes
through when adding a new breakpoint.
-1. Seize exclusive code write permission (suspend process until we get it).
+1. Seize exclusive code modification permission (suspend process until we get
+ it).
2. Allocate breakpoint structure `GenericBp` including both generations.
Set the active part as disabled with a zeroed flagfield. Save the original
@@ -154,14 +155,13 @@ through when adding a new breakpoint.
10. Prepare for next call to `trace_pattern` by updating the new staging part
(the old active) of the breakpoint to be identic to the the new active part.
-11. Release code write permission and return from `trace_pattern`.
+11. Release code modification permission and return from `trace_pattern`.
-The code write permission "lock" seized in step 1 is the same as used
-by code loading. This will ensure that only one process at a time can
-stage new trace settings but it will also prevent concurrent code
-loading and make sure we see a consistent view of the beam code during
-the entire sequence.
+The code modification permission "lock" seized in step 1 is also taken by code
+loading. This ensures that only one process at a time can stage new trace
+settings, and also prevents concurrent codeloading and make sure we see a
+consistent view of the beam code during the entire sequence.
Between step 6 and 8, runninng processes might execute the written
`op_i_generic_breakpoint` instruction. They will get the breakpoint
@@ -186,7 +186,8 @@ original beam instruction.
Here is a more complete sequence that contains both adding, updating
and removing breakpoints.
-1. Seize exclusive code write permission (suspend process until we get it).
+1. Seize exclusive code modification permission (suspend process until we get
+ it).
2. Allocate new breakpoint structures with a disabled active part and
the original beam instruction. Write a pointer to the breakpoint in
@@ -216,7 +217,7 @@ and removing breakpoints.
12. Deallocate disabled breakpoint structures.
-13. Release code write permission and return from `trace_pattern`.
+13. Release code modification permission and return from `trace_pattern`.
### All that Waiting for Thread Progress
diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl
index 020065f6d8..5c06380391 100644
--- a/erts/emulator/test/code_SUITE.erl
+++ b/erts/emulator/test/code_SUITE.erl
@@ -33,8 +33,9 @@
false_dependency/1,coverage/1,fun_confusion/1,
t_copy_literals/1, t_copy_literals_frags/1,
erl_544/1, max_heap_size/1,
- check_process_code_signal_order/1,
- check_process_code_dirty_exec_proc/1]).
+ check_process_code_signal_order/1,
+ check_process_code_dirty_exec_proc/1,
+ call_fun_before_load/1]).
-define(line_trace, 1).
-include_lib("common_test/include/ct.hrl").
@@ -52,7 +53,7 @@ all() ->
false_dependency,
coverage, fun_confusion, t_copy_literals, t_copy_literals_frags,
erl_544, max_heap_size, check_process_code_signal_order,
- check_process_code_dirty_exec_proc].
+ check_process_code_dirty_exec_proc, call_fun_before_load].
init_per_suite(Config) ->
erts_debug:set_internal_state(available_internal_state, true),
@@ -1184,6 +1185,29 @@ check_process_code_dirty_exec_proc(Config) when is_list(Config) ->
false = is_process_alive(Pid),
ok.
+%% OTP-18016: When loading a module over itself, a race in fun loading made it
+%% possible to call a local fun before the module was fully reloaded.
+call_fun_before_load(Config) ->
+ Priv = proplists:get_value(priv_dir, Config),
+ Data = proplists:get_value(data_dir, Config),
+ Path = code:get_path(),
+ true = code:add_path(Priv),
+ try
+ SrcFile = filename:join(Data, "call_fun_before_load.erl"),
+ ObjFile = filename:join(Priv, "call_fun_before_load.beam"),
+ {ok,Mod,Code} = compile:file(SrcFile, [binary, report]),
+ {module,Mod} = code:load_binary(Mod, ObjFile, Code),
+
+ ok = call_fun_before_load:run(ObjFile, Code),
+
+ code:purge(call_fun_before_load)
+ after
+ code:set_path(Path),
+ code:delete(call_fun_before_load),
+ code:purge(call_fun_before_load)
+ end,
+ ok.
+
%% Utilities.
make_sub_binary(Bin) when is_binary(Bin) ->
diff --git a/erts/emulator/test/code_SUITE_data/call_fun_before_load.erl b/erts/emulator/test/code_SUITE_data/call_fun_before_load.erl
new file mode 100644
index 0000000000..9ede44ebd2
--- /dev/null
+++ b/erts/emulator/test/code_SUITE_data/call_fun_before_load.erl
@@ -0,0 +1,45 @@
+-module(call_fun_before_load).
+-export([run/2]).
+
+run(ObjFile, Code) ->
+ Ref = make_ref(),
+ Self = self(),
+ {Pid, MRef} =
+ spawn_monitor(
+ fun() ->
+ %% If we're called before being loaded, process_info/2 can't
+ %% figure out where we are, reporting that we're in an
+ %% unknown module rather than the current one.
+ Fun0 = fun(Fun) ->
+ {current_function,
+ {?MODULE, _, _}} =
+ process_info(self(), current_function),
+ Fun(Fun)
+ end,
+
+ NumScheds = erlang:system_info(schedulers_online),
+ Workers = [spawn_link(fun() -> Fun0(Fun0) end)
+ || _ <- lists:seq(1, NumScheds)],
+
+ %% Give the workers time to start before reloading ourselves.
+ timer:sleep(100),
+
+ code:load_binary(?MODULE, ObjFile, Code),
+
+ %% Give the workers time to crash before exiting.
+ timer:sleep(100),
+
+ Self ! {Ref, Workers},
+
+ ok
+ end),
+ receive
+ {Ref, Workers} ->
+ erlang:demonitor(MRef, [flush]),
+ [exit(Worker, kill) || Worker <- Workers],
+ ok;
+ {'DOWN', MRef, process, Pid, {{badmatch,_}, _}} ->
+ ct:fail("Ran newly staged but not yet loaded code.", []);
+ {'DOWN', MRef, process, Pid, Reason} ->
+ ct:fail(Reason)
+ end.
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 26c897c406..fda41fbd67 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -658,14 +658,14 @@ t_nifs_attrib(Config) when is_list(Config) ->
ok.
-%% Test erlang:load_nif/2 waiting for code_write_permission.
+%% Test erlang:load_nif/2 waiting for code_mod_permission.
t_load_race(Config) ->
Data = proplists:get_value(data_dir, Config),
File = filename:join(Data, "nif_mod"),
{ok,nif_mod,Bin} = compile:file(File, [binary,return_errors]),
{module,nif_mod} = erlang:load_module(nif_mod,Bin),
try
- erts_debug:set_internal_state(code_write_permission, true),
+ erts_debug:set_internal_state(code_mod_permission, true),
Papa = self(),
spawn_link(fun() ->
ok = nif_mod:load_nif_lib(Config, 1),
@@ -674,7 +674,7 @@ t_load_race(Config) ->
timer:sleep(100),
timeout = receive_any(0)
after
- true = erts_debug:set_internal_state(code_write_permission, false)
+ true = erts_debug:set_internal_state(code_mod_permission, false)
end,
"NIF loaded" = receive_any(),
diff --git a/erts/emulator/test/sensitive_SUITE.erl b/erts/emulator/test/sensitive_SUITE.erl
index e3e753c6a5..4996051a90 100644
--- a/erts/emulator/test/sensitive_SUITE.erl
+++ b/erts/emulator/test/sensitive_SUITE.erl
@@ -293,7 +293,7 @@ running_trace(Config) when is_list(Config) ->
{trace,Self,in,{sensitive_SUITE,running_trace,1}} | Extra] = Messages,
case erlang:system_info(emu_type) of
ET when ET =:= debug; ET =:= asan ->
- %% F_DBG_FORCED_TRAP in erts_try_size_code_write_permission
+ %% F_DBG_FORCED_TRAP in erts_try_seize_code_mod_permission
[{trace,Self,out,{erts_internal,trace,3}},
{trace,Self,in,{erts_internal,trace,3}}] = Extra;
_ ->
diff --git a/erts/etc/unix/etp.py b/erts/etc/unix/etp.py
index 53b8bd173f..9c1dcc055a 100644
--- a/erts/etc/unix/etp.py
+++ b/erts/etc/unix/etp.py
@@ -686,10 +686,10 @@ def strip_literal_tag(valobj):
return valobj
def init(target):
- names = ['beam_run_process', 'beam_normal_exit', 'beam_exit', 'beam_save_calls',
- 'beam_bif_export_trap', 'beam_export_trampoline', 'beam_continue_exit',
- 'beam_return_to_trace', 'beam_return_trace', 'beam_exception_trace',
- 'beam_return_time_trace']
+ names = ['beam_run_process', 'beam_normal_exit', 'beam_exit', 'beam_save_calls_export',
+ 'beam_save_calls_fun', 'beam_bif_export_trap', 'beam_export_trampoline',
+ 'beam_continue_exit', 'beam_return_to_trace', 'beam_return_trace',
+ 'beam_exception_trace', 'beam_return_time_trace']
for name in names:
code_pointers[global_var(name, target).unsigned] = name