summaryrefslogtreecommitdiff
path: root/erts/emulator/beam/erl_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r--erts/emulator/beam/erl_process.c93
1 files changed, 66 insertions, 27 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index ac0a4f8902..479a4c6f9a 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -708,10 +708,10 @@ erts_pre_init_process(void)
erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks
= ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks
- = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
- = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_NFUNC_TRAP_WRAPPER].get_locks
+ = ERTS_PSD_NFUNC_TRAP_WRAPPER_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_NFUNC_TRAP_WRAPPER].set_locks
+ = ERTS_PSD_NFUNC_TRAP_WRAPPER_SET_LOCKS;
erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].get_locks
= ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS;
@@ -6478,8 +6478,8 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
ASSERT(!(state & (ERTS_PSFLG_DIRTY_IO_PROC
|ERTS_PSFLG_DIRTY_CPU_PROC))
- || (BeamIsOpCode(*p->i, op_call_nif)
- || BeamIsOpCode(*p->i, op_apply_bif)));
+ || (BeamIsOpCode(*p->i, op_call_nif_WWW)
+ || BeamIsOpCode(*p->i, op_call_bif_W)));
a = state;
@@ -10992,8 +10992,13 @@ erts_set_gc_state(Process *c_p, int enable)
ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
if (!enable) {
- c_p->flags |= F_DISABLE_GC;
- return 0;
+ /* Strictly speaking it's not illegal to disable the GC when it's
+ * already disabled, but we risk enabling the GC prematurely if (for
+ * example) a BIF were to blindly disable it when trapping and then
+ * re-enable it before returning its result. */
+ ASSERT(!(c_p->flags & F_DISABLE_GC));
+ c_p->flags |= F_DISABLE_GC;
+ return 0;
}
c_p->flags &= ~F_DISABLE_GC;
@@ -11453,7 +11458,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#else
arg_size = size_object_litopt(args, &litarea);
#endif
- heap_need = arg_size;
+ heap_need = arg_size + 1; /* Reserve place for continuation pointer */
p->flags = flags;
@@ -11502,7 +11507,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->old_hend = p->old_htop = p->old_heap = NULL;
p->high_water = p->heap;
p->gen_gcs = 0;
- p->stop = p->hend = p->heap + sz;
+ p->hend = p->heap + sz;
+ p->stop = p->hend - 1; /* Reserve place for continuation pointer */
p->htop = p->heap;
p->heap_sz = sz;
p->abandoned_heap = NULL;
@@ -11520,7 +11526,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->current = &p->u.initial;
p->i = (BeamInstr *) beam_apply;
- p->cp = (BeamInstr *) beam_apply+1;
+ p->stop[0] = make_cp(beam_apply + 1);
p->arg_reg = p->def_arg_reg;
p->max_arg_reg = sizeof(p->def_arg_reg)/sizeof(p->def_arg_reg[0]);
@@ -11583,9 +11589,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->mbuf_sz = 0;
erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL);
p->dictionary = NULL;
- p->seq_trace_lastcnt = 0;
- p->seq_trace_clock = 0;
- SEQ_TRACE_TOKEN(p) = NIL;
#ifdef USE_VM_PROBES
DT_UTAG(p) = NIL;
DT_UTAG_FLAGS(p) = 0;
@@ -11606,6 +11609,45 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->fp_exception = 0;
#endif
+ /* seq_trace is handled before regular tracing as the latter may touch the
+ * trace token. */
+ if (have_seqtrace(SEQ_TRACE_TOKEN(parent))) {
+ Eterm token;
+ Uint token_sz;
+ Eterm *hp;
+
+ ASSERT(SEQ_TRACE_TOKEN_ARITY(parent) == 5);
+ ASSERT(is_immed(SEQ_TRACE_TOKEN_FLAGS(parent)));
+ ASSERT(is_immed(SEQ_TRACE_TOKEN_SERIAL(parent)));
+ ASSERT(is_immed(SEQ_TRACE_TOKEN_LASTCNT(parent)));
+
+ seq_trace_update_serial(parent);
+
+ token = SEQ_TRACE_TOKEN(parent);
+ token_sz = size_object(token);
+
+ hp = HAlloc(p, token_sz);
+ SEQ_TRACE_TOKEN(p) = copy_struct(token, token_sz, &hp, &MSO(p));
+
+ /* The counters behave the same way on spawning as they do on messages;
+ * we don't inherit our parent's lastcnt. */
+ p->seq_trace_lastcnt = parent->seq_trace_clock;
+ p->seq_trace_clock = parent->seq_trace_clock;
+
+ ASSERT((locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) ==
+ (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE));
+
+ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+
+ seq_trace_output(token, NIL, SEQ_TRACE_SPAWN, p->common.id, parent);
+ } else {
+ SEQ_TRACE_TOKEN(p) = NIL;
+ p->seq_trace_lastcnt = 0;
+ p->seq_trace_clock = 0;
+ }
+
if (IS_TRACED(parent)) {
if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS) {
ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
@@ -11627,9 +11669,14 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
}
}
if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
- locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
- erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ /* The locks may already be released if seq_trace is enabled as
+ * well. */
+ if ((locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE))
+ == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) {
+ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ }
trace_proc_spawn(parent, am_spawn, p->common.id, mod, func, args);
if (so->flags & SPO_LINK)
trace_proc(parent, locks, parent, am_link, p->common.id);
@@ -11793,7 +11840,6 @@ void erts_init_empty_process(Process *p)
p->u.initial.function = 0;
p->u.initial.arity = 0;
p->catches = 0;
- p->cp = NULL;
p->i = NULL;
p->current = NULL;
@@ -11871,7 +11917,6 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->bif_timers == NULL);
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
- ASSERT(p->cp == NULL);
ASSERT(p->i == NULL);
ASSERT(p->current == NULL);
@@ -11931,7 +11976,7 @@ delete_process(Process* p)
if (pbt)
erts_free(ERTS_ALC_T_BPD, (void *) pbt);
- erts_destroy_nif_export(p);
+ erts_destroy_nfunc(p);
/* Cleanup psd */
@@ -13117,9 +13162,6 @@ erts_program_counter_info(fmtfn_t to, void *to_arg, Process *p)
erts_print(to, to_arg, "Program counter: %p (", p->i);
print_function_from_pc(to, to_arg, p->i);
erts_print(to, to_arg, ")\n");
- erts_print(to, to_arg, "CP: %p (", p->cp);
- print_function_from_pc(to, to_arg, p->cp);
- erts_print(to, to_arg, ")\n");
state = erts_atomic32_read_acqb(&p->state);
if (!(state & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
@@ -13396,9 +13438,6 @@ static void print_current_process_info(fmtfn_t to, void *to_arg,
erts_print(to, to_arg, "Current Process Program counter: %p (", p->i);
print_function_from_pc(to, to_arg, p->i);
erts_print(to, to_arg, ")\n");
- erts_print(to, to_arg, "Current Process CP: %p (", p->cp);
- print_function_from_pc(to, to_arg, p->cp);
- erts_print(to, to_arg, ")\n");
/* Getting this stacktrace can segfault if we are very very
unlucky if called while a process is being garbage collected.
@@ -13430,7 +13469,7 @@ static void print_current_process_info(fmtfn_t to, void *to_arg,
*
* A BIF that calls this should make sure to schedule out to never come back:
* erts_halt(code);
- * ERTS_BIF_YIELD1(bif_export[BIF_erlang_halt_1], BIF_P, NIL);
+ * ERTS_BIF_YIELD1(&bif_trap_export[BIF_erlang_halt_1], BIF_P, NIL);
*/
void erts_halt(int code)
{