summaryrefslogtreecommitdiff
path: root/erts
diff options
context:
space:
mode:
authorJohn Högberg <john@erlang.org>2019-08-27 13:11:25 +0200
committerJohn Högberg <john@erlang.org>2019-09-16 14:21:25 +0200
commit40356bd2ae0e4f4c31204b3dd13d14541442f1a1 (patch)
treec3845959870988c148b2fbade94808bd1ebc0ae8 /erts
parent20cf78bed119da47c66c3efd77c8199b424582b7 (diff)
downloaderlang-40356bd2ae0e4f4c31204b3dd13d14541442f1a1.tar.gz
erts: Refactor BIF tracing
This commit replaces our current BIF-specific tracing functionality with the general function/export tracing used for everything else, fixing a few longstanding issues: * BIFs that trapped to themselves, for example lists:reverse/2, would generate a call trace message for each trap but only a single return trace message. * BIFs that trapped elsewhere, like erlang:delete_module/1, would lose their return trace messages altogether. * Return/exception trace messages on tail calls would point at the function "above" the caller. * Call count tracing simply didn't work.
Diffstat (limited to 'erts')
-rw-r--r--erts/emulator/Makefile.in5
-rw-r--r--erts/emulator/beam/beam_bif_load.c16
-rw-r--r--erts/emulator/beam/beam_bp.c294
-rw-r--r--erts/emulator/beam/beam_bp.h14
-rw-r--r--erts/emulator/beam/beam_emu.c275
-rw-r--r--erts/emulator/beam/beam_load.c181
-rw-r--r--erts/emulator/beam/beam_load.h2
-rw-r--r--erts/emulator/beam/bif.c46
-rw-r--r--erts/emulator/beam/bif.tab23
-rw-r--r--erts/emulator/beam/bif_instrs.tab109
-rw-r--r--erts/emulator/beam/erl_alloc.c6
-rw-r--r--erts/emulator/beam/erl_alloc.types1
-rw-r--r--erts/emulator/beam/erl_bif_info.c73
-rw-r--r--erts/emulator/beam/erl_bif_trace.c140
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.c53
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.h50
-rw-r--r--erts/emulator/beam/erl_nif.c100
-rw-r--r--erts/emulator/beam/erl_trace.h6
-rw-r--r--erts/emulator/beam/export.c4
-rw-r--r--erts/emulator/beam/export.h21
-rw-r--r--erts/emulator/beam/global.h6
-rw-r--r--erts/emulator/beam/ops.tab161
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl72
-rw-r--r--erts/emulator/test/dirty_bif_SUITE.erl4
-rw-r--r--erts/emulator/test/match_spec_SUITE.erl3
-rw-r--r--erts/emulator/test/trace_call_time_SUITE.erl70
-rwxr-xr-xerts/emulator/utils/make_tables74
27 files changed, 731 insertions, 1078 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index b2b8acd1b0..70718575e9 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -599,7 +599,6 @@ endif
$(TTF_DIR)/erl_bif_table.c \
$(TTF_DIR)/erl_bif_table.h \
-$(TTF_DIR)/erl_bif_wrap.c \
$(TTF_DIR)/erl_bif_list.h \
$(TTF_DIR)/erl_atom_table.c \
$(TTF_DIR)/erl_atom_table.h \
@@ -885,7 +884,6 @@ RUN_OBJS += \
$(OBJDIR)/erl_bif_persistent.o \
$(OBJDIR)/erl_bif_atomics.o $(OBJDIR)/erl_bif_counters.o \
$(OBJDIR)/erl_bif_trace.o $(OBJDIR)/erl_bif_unique.o \
- $(OBJDIR)/erl_bif_wrap.o $(OBJDIR)/erl_nfunc_sched.o \
$(OBJDIR)/erl_guard_bifs.o $(OBJDIR)/erl_dirty_bif_wrap.o \
$(OBJDIR)/erl_trace.o $(OBJDIR)/copy.o \
$(OBJDIR)/utils.o $(OBJDIR)/bif.o \
@@ -920,7 +918,8 @@ RUN_OBJS += \
$(OBJDIR)/erl_ptab.o $(OBJDIR)/erl_map.o \
$(OBJDIR)/erl_msacc.o $(OBJDIR)/erl_lock_flags.o \
$(OBJDIR)/erl_io_queue.o $(OBJDIR)/erl_db_catree.o \
- $(ESOCK_RUN_OBJS) $(OBJDIR)/erl_flxctr.o
+ $(ESOCK_RUN_OBJS) $(OBJDIR)/erl_flxctr.o \
+ $(OBJDIR)/erl_nfunc_sched.o
LTTNG_OBJS = $(OBJDIR)/erlang_lttng.o
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 587a61d814..a406e14741 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -847,8 +847,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
ep->addressv[code_ix] = (void*)ep->trampoline.not_loaded.deferred;
ep->trampoline.not_loaded.deferred = 0;
} else {
- if (ep->addressv[code_ix] == ep->trampoline.raw &&
- BeamIsOpCode(ep->trampoline.op, op_apply_bif)) {
+ if (ep->bif_table_index != -1) {
continue;
}
@@ -877,7 +876,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
continue;
}
- if (BeamIsOpCode(ep->trampoline.op, op_apply_bif)) {
+ if (ep->bif_table_index != -1) {
continue;
}
@@ -1891,10 +1890,7 @@ delete_code(Module* modp)
Export *ep = export_list(i, code_ix);
if (ep != NULL && (ep->info.mfa.module == module)) {
if (ep->addressv[code_ix] == ep->trampoline.raw) {
- if (BeamIsOpCode(ep->trampoline.op, op_apply_bif)) {
- continue;
- }
- else if (BeamIsOpCode(ep->trampoline.op, op_i_generic_breakpoint)) {
+ if (BeamIsOpCode(ep->trampoline.op, op_i_generic_breakpoint)) {
ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
ASSERT(modp->curr.num_traced_exports > 0);
DBG_TRACE_MFA_P(&ep->info.mfa,
@@ -1906,6 +1902,12 @@ delete_code(Module* modp)
!erts_initialized);
}
}
+
+ if (ep->bif_table_index != -1 && ep->is_bif_traced) {
+ /* Code unloading kills both global and local call tracing. */
+ ep->is_bif_traced = 0;
+ }
+
ep->addressv[code_ix] = ep->trampoline.raw;
ep->trampoline.op = BeamOpCodeAddr(op_call_error_handler);
ep->trampoline.not_loaded.deferred = 0;
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 1b92fe0a1f..1bb20f6ae3 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -207,9 +207,6 @@ erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
if (erts_is_function_native(ci)) {
continue;
}
- if (is_nil(ci->mfa.module)) { /* Ignore BIF stub */
- continue;
- }
switch (specified) {
case 3:
if (ci->mfa.arity != mfa->arity)
@@ -244,8 +241,10 @@ erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction));
ne = 0;
for (i = 0; i < num_exps; i++) {
- Export* ep = export_list(i, code_ix);
- BeamInstr* pc;
+ BeamInstr *func;
+ Export* ep;
+
+ ep = export_list(i, code_ix);
switch (specified) {
case 3:
@@ -263,19 +262,20 @@ erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
ASSERT(0);
}
- pc = ep->trampoline.raw;
- if (ep->addressv[code_ix] == pc) {
- if (BeamIsOpCode(*pc, op_apply_bif) ||
- BeamIsOpCode(*pc, op_call_error_handler)) {
- continue;
- }
- ASSERT(BeamIsOpCode(*pc, op_i_generic_breakpoint));
- } else if (erts_is_function_native(erts_code_to_codeinfo(ep->addressv[code_ix]))) {
- continue;
- }
+ func = ep->addressv[code_ix];
+
+ if (func == ep->trampoline.raw) {
+ if (BeamIsOpCode(*func, op_call_error_handler)) {
+ continue;
+ }
+ ASSERT(BeamIsOpCode(*func, op_i_generic_breakpoint));
+ } else if (erts_is_function_native(erts_code_to_codeinfo(func))) {
+ continue;
+ }
f->matching[ne].ci = &ep->info;
f->matching[ne].mod = erts_get_module(ep->info.mfa.module, code_ix);
+
ne++;
}
@@ -305,18 +305,6 @@ erts_consolidate_bp_data(BpFunctions* f, int local)
}
}
-void
-erts_consolidate_bif_bp_data(void)
-{
- int i;
-
- ERTS_LC_ASSERT(erts_has_code_write_permission());
- for (i = 0; i < BIF_SIZE; i++) {
- Export *ep = bif_export[i];
- consolidate_bp_data(0, &ep->info, 0);
- }
-}
-
static void
consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local)
{
@@ -495,7 +483,7 @@ erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, ErtsTracer tracer)
}
void
-erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local)
+erts_set_export_trace(ErtsCodeInfo *ci, Binary *match_spec, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
@@ -503,25 +491,6 @@ erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local)
}
void
-erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec, ErtsTracer tracer)
-{
- set_function_break(ci, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
-}
-
-void
-erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op count_op)
-{
- set_function_break(ci, NULL,
- ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
- count_op, erts_tracer_nil);
-}
-
-void
-erts_clear_time_trace_bif(ErtsCodeInfo *ci) {
- clear_function_break(ci, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
-}
-
-void
erts_set_debug_break(BpFunctions* f) {
set_break(f, NULL, ERTS_BPF_DEBUG, 0, erts_tracer_nil);
}
@@ -547,7 +516,7 @@ erts_clear_trace_break(BpFunctions* f)
}
void
-erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local)
+erts_clear_export_trace(ErtsCodeInfo *ci, int local)
{
GenericBp* g = ci->u.gen_bp;
@@ -566,12 +535,6 @@ erts_clear_mtrace_break(BpFunctions* f)
}
void
-erts_clear_mtrace_bif(ErtsCodeInfo *ci)
-{
- clear_function_break(ci, ERTS_BPF_META_TRACE);
-}
-
-void
erts_clear_debug_break(BpFunctions* f)
{
ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
@@ -771,229 +734,6 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg)
}
}
-/*
- * Entry point called by the trace wrap functions in erl_bif_wrap.c
- *
- * The trace wrap functions are themselves called through the export
- * entries instead of the original BIF functions.
- */
-Eterm
-erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
-{
- Eterm result;
- Eterm (*func)(Process*, Eterm*, BeamInstr*);
- Export* ep = bif_export[bif_index];
- Uint32 flags = 0, flags_meta = 0;
- ErtsTracer meta_tracer = erts_tracer_nil;
- int applying = (I == ep->trampoline.raw); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
- BeamInstr* cp = (BeamInstr *) p->stop[0];
- GenericBp* g;
- GenericBpData* bp = NULL;
- Uint bp_flags = 0;
- int return_to_trace = 0;
-
- ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-
- g = ep->info.u.gen_bp;
- if (g) {
- bp = &g->data[erts_active_bp_ix()];
- bp_flags = bp->flags;
- }
-
- /*
- * Make continuation pointer OK, it is not during direct BIF calls,
- * but it is correct during apply of bif.
- */
- if (!applying) {
- p->stop[0] = (Eterm) I;
- } else {
- fixup_cp_before_trace(p, &return_to_trace);
- }
- if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
- IS_TRACED_FL(p, F_TRACE_CALLS)) {
- int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
- flags = erts_call_trace(p, &ep->info, bp->local_ms, args,
- local, &ERTS_TRACER(p));
- }
- if (bp_flags & ERTS_BPF_META_TRACE) {
- ErtsTracer old_tracer;
-
- meta_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer);
- old_tracer = meta_tracer;
- flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args,
- 0, &meta_tracer);
-
- if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
- ErtsTracer new_tracer = erts_tracer_nil;
- erts_tracer_update(&new_tracer, meta_tracer);
- if (old_tracer == erts_atomic_cmpxchg_acqb(
- &bp->meta_tracer->tracer,
- (erts_aint_t)new_tracer,
- (erts_aint_t)old_tracer)) {
- ERTS_TRACER_CLEAR(&old_tracer);
- } else {
- ERTS_TRACER_CLEAR(&new_tracer);
- }
- }
- }
- if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
- IS_TRACED_FL(p, F_TRACE_CALLS)) {
- erts_trace_time_call(p, &ep->info, bp->time);
- }
-
- /* Restore original continuation pointer (if changed). */
- p->stop[0] = (Eterm) cp;
-
- func = bif_table[bif_index].f;
-
- result = func(p, args, I);
-
- if (erts_nif_export_check_save_trace(p, result,
- applying, ep,
- flags,
- flags_meta, I,
- meta_tracer)) {
- /*
- * erts_bif_trace_epilogue() will be called
- * later when appropriate via the NIF export
- * scheduling functionality...
- */
- return result;
- }
-
- return erts_bif_trace_epilogue(p, result, applying, ep,
- flags, flags_meta, I,
- meta_tracer);
-}
-
-Eterm
-erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
- Export* ep, Uint32 flags,
- Uint32 flags_meta, BeamInstr* I,
- ErtsTracer meta_tracer)
-{
- BeamInstr *cp = NULL;
-
- if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
- BeamInstr i_return_trace = beam_return_trace[0];
- BeamInstr i_return_to_trace = beam_return_to_trace[0];
- BeamInstr i_return_time_trace = beam_return_time_trace[0];
- Eterm *cpp;
-
- /* Maybe advance cp to skip trace stack frames */
- cpp = p->stop;
- while (is_not_CP(*cpp)) {
- cpp++;
- }
- for (cp = cp_val(*cpp++); ;) {
- if (*cp == i_return_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 2; /* Skip return_trace parameters */
- } else if (*cp == i_return_time_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 1; /* Skip return_time_trace parameters */
- } else if (*cp == i_return_to_trace) {
- /* A return_to trace message is going to be generated
- * by normal means, so we do not have to.
- */
- cp = NULL;
- break;
- } else {
- break;
- }
- cp = cp_val(*cpp++);
- }
- }
-
- /* Try to get these in the order
- * they usually appear in normal code... */
- if (is_non_value(result)) {
- Uint reason = p->freason;
- if (reason != TRAP) {
- Eterm class;
- Eterm value = p->fvalue;
- /* Expand error value like in handle_error() */
- if (reason & EXF_ARGLIST) {
- Eterm *tp;
- ASSERT(is_tuple(value));
- tp = tuple_val(value);
- value = tp[1];
- }
- if ((reason & EXF_THROWN) && (p->catches <= 0)) {
- Eterm *hp = HAlloc(p, 3);
- value = TUPLE2(hp, am_nocatch, value);
- reason = EXC_ERROR;
- }
- /* Note: expand_error_value() could theoretically
- * allocate on the heap, but not for any error
- * returned by a BIF, and it would do no harm,
- * just be annoying.
- */
- value = expand_error_value(p, reason, value);
- class = exception_tag[GET_EXC_CLASS(reason)];
-
- if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, &ep->info.mfa, class, value,
- &meta_tracer);
- }
- if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, &ep->info.mfa, class, value,
- &ERTS_TRACER(p));
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
- /* can only happen if(local)*/
- Eterm *ptr = p->stop;
- ASSERT(!applying || is_CP(*ptr));
- ASSERT(ptr <= STACK_START(p));
- /* Search the nearest stack frame for a catch */
- while (++ptr < STACK_START(p)) {
- if (is_CP(*ptr)) break;
- if (is_catch(*ptr)) {
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into
- * calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
- erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
- erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- } else {
- if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, &ep->info.mfa, result, &meta_tracer);
- }
- /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
- if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, &ep->info.mfa, result, &ERTS_TRACER(p));
- }
- if (flags & MATCH_SET_RETURN_TO_TRACE &&
- IS_TRACED_FL(p, F_TRACE_RETURN_TO)) {
- /* can only happen if(local)*/
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- return result;
-}
-
static ErtsTracer
do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg,
int local, Binary* ms, ErtsTracer tracer)
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 9f7ec16b71..54e84e7e4f 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -119,20 +119,16 @@ void erts_bp_free_matched_functions(BpFunctions* f);
void erts_install_breakpoints(BpFunctions* f);
void erts_uninstall_breakpoints(BpFunctions* f);
void erts_consolidate_bp_data(BpFunctions* f, int local);
-void erts_consolidate_bif_bp_data(void);
void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
void erts_clear_trace_break(BpFunctions *f);
-void erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local);
-void erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local);
+void erts_set_export_trace(ErtsCodeInfo *ci, Binary *match_spec, int local);
+void erts_clear_export_trace(ErtsCodeInfo *ci, int local);
void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
ErtsTracer tracer);
void erts_clear_mtrace_break(BpFunctions *f);
-void erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec,
- ErtsTracer tracer);
-void erts_clear_mtrace_bif(ErtsCodeInfo *ci);
void erts_set_debug_break(BpFunctions *f);
void erts_clear_debug_break(BpFunctions *f);
@@ -151,8 +147,6 @@ BeamInstr erts_trace_break(Process *p, ErtsCodeInfo *ci, Eterm *args,
int erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local);
int erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret);
-int erts_is_mtrace_bif(ErtsCodeInfo *ci, Binary **match_spec_ret,
- ErtsTracer *tracer_ret);
int erts_is_native_break(ErtsCodeInfo *ci);
int erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret);
int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
@@ -163,10 +157,6 @@ void erts_schedule_time_break(Process *p, Uint out);
void erts_set_time_break(BpFunctions *f, enum erts_break_op);
void erts_clear_time_break(BpFunctions *f);
-int erts_is_time_trace_bif(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
-void erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op);
-void erts_clear_time_trace_bif(ErtsCodeInfo *ci);
-
ErtsCodeInfo *erts_find_local_func(ErtsCodeMFA *mfa);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 48824ef9da..5ff4549818 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -111,10 +111,9 @@ do { \
#define CHECK_ALIGNED(Dst) ASSERT((((Uint)&Dst) & (sizeof(Uint)-1)) == 0)
-#define GET_BIF_MODULE(p) ((p)->info.mfa.module)
-#define GET_BIF_FUNCTION(p) ((p)->info.mfa.function)
-#define GET_BIF_ARITY(p) ((p)->info.mfa.arity)
-#define GET_BIF_ADDRESS(p) ((BifFunction)((p)->trampoline.bif.func))
+#define GET_EXPORT_MODULE(p) ((p)->info.mfa.module)
+#define GET_EXPORT_FUNCTION(p) ((p)->info.mfa.function)
+#define GET_EXPORT_ARITY(p) ((p)->info.mfa.arity)
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
@@ -296,6 +295,7 @@ do { \
*/
static void init_emulator_finish(void) ERTS_NOINLINE;
static ErtsCodeMFA *ubif2mfa(void* uf) ERTS_NOINLINE;
+static BeamInstr *printable_return_address(Process* p, Eterm *E) ERTS_NOINLINE;
static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
Eterm* reg, ErtsCodeMFA* bif_mfa) ERTS_NOINLINE;
static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
@@ -888,15 +888,52 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
}
/*
+ * Enter all BIFs into the export table.
+ *
+ * Note that they will all call the error_handler until their modules have been
+ * loaded, which may prevent the system from booting if BIFs from non-preloaded
+ * modules are apply/3'd while loading code. Ordinary BIF calls will work fine
+ * however since they won't go through export entries.
+ */
+static void install_bifs(void) {
+ int i;
+
+ for (i = 0; i < BIF_SIZE; i++) {
+ BifEntry *entry;
+ Export *ep;
+ int j;
+
+ entry = &bif_table[i];
+
+ ep = erts_export_put(entry->module, entry->name, entry->arity);
+
+ ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
+ ep->info.mfa.module = entry->module;
+ ep->info.mfa.function = entry->name;
+ ep->info.mfa.arity = entry->arity;
+ ep->bif_table_index = i;
+
+ memset(&ep->trampoline, 0, sizeof(ep->trampoline));
+ ep->trampoline.op = BeamOpCodeAddr(op_call_error_handler);
+
+ for (j = 0; j < ERTS_NUM_CODE_IX; j++) {
+ ep->addressv[j] = ep->trampoline.raw;
+ }
+
+ bif_export[i] = ep;
+ }
+}
+
+/*
* One-time initialization of emulator. Does not need to be
* in process_main().
*/
static void
init_emulator_finish(void)
{
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
int i;
-#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
for (i = 0; i < NUMBER_OF_OPCODES; i++) {
BeamInstr instr = BeamOpCodeAddr(i);
if (instr >= (1ull << 32)) {
@@ -917,24 +954,7 @@ init_emulator_finish(void)
beam_exception_trace[0] = BeamOpCodeAddr(op_return_trace); /* UGLY */
beam_return_time_trace[0] = BeamOpCodeAddr(op_i_return_time_trace);
- /*
- * Enter all BIFs into the export table.
- */
- for (i = 0; i < BIF_SIZE; i++) {
- Export *ep = erts_export_put(bif_table[i].module,
- bif_table[i].name,
- bif_table[i].arity);
-
- ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
- ep->info.mfa.module = bif_table[i].module;
- ep->info.mfa.function = bif_table[i].name;
- ep->info.mfa.arity = bif_table[i].arity;
-
- ep->trampoline.op = BeamOpCodeAddr(op_apply_bif);
- ep->trampoline.bif.func = (BeamInstr) bif_table[i].f;
-
- bif_export[i] = ep;
- }
+ install_bifs();
}
/*
@@ -1234,6 +1254,33 @@ Eterm error_atom[NUMBER_EXIT_CODES] = {
am_badkey, /* 19 */
};
+/* Returns the return address at E[0] in printable form, skipping tracing in
+ * the same manner as gather_stacktrace.
+ *
+ * This is needed to generate correct stacktraces when throwing errors from
+ * instructions that return like an ordinary function, such as call_nif. */
+static BeamInstr *printable_return_address(Process* p, Eterm *E) {
+ Eterm *ptr = E;
+
+ ASSERT(is_CP(*ptr));
+
+ while (ptr < STACK_START(p)) {
+ BeamInstr *cp = cp_val(*ptr);
+
+ if (cp == beam_exception_trace || cp == beam_return_trace) {
+ ptr += 3;
+ } else if (cp == beam_return_time_trace) {
+ ptr += 2;
+ } else if (cp == beam_return_to_trace) {
+ ptr += 1;
+ } else {
+ return cp;
+ }
+ }
+
+ ERTS_ASSERT(!"No continuation pointer on stack");
+}
+
/*
* To fully understand the error handling, one must keep in mind that
* when an exception is thrown, the search for a handler can jump back
@@ -1527,19 +1574,24 @@ expand_error_value(Process* c_p, Uint freason, Eterm Value) {
static void
-gather_stacktrace(Process* p, Eterm *ptr, struct StackTrace* s, int depth)
+gather_stacktrace(Process* p, struct StackTrace* s, int depth)
{
- BeamInstr *prev;
- BeamInstr i_return_trace;
+ BeamInstr i_return_time_trace;
BeamInstr i_return_to_trace;
+ BeamInstr i_return_trace;
+ BeamInstr *prev;
+ Eterm *ptr;
if (depth == 0) {
return;
}
- prev = s->depth ? s->trace[s->depth-1] : s->pc;
- i_return_trace = beam_return_trace[0];
+ i_return_time_trace = beam_return_time_trace[0];
i_return_to_trace = beam_return_to_trace[0];
+ i_return_trace = beam_return_trace[0];
+
+ prev = s->depth ? s->trace[s->depth-1] : s->pc;
+ ptr = p->stop;
/*
* Traverse the stack backwards and add all unique continuation
@@ -1552,16 +1604,15 @@ gather_stacktrace(Process* p, Eterm *ptr, struct StackTrace* s, int depth)
while (ptr < STACK_START(p) && depth > 0) {
if (is_CP(*ptr)) {
- if (*cp_val(*ptr) == i_return_trace) {
- /* Skip stack frame variables */
- do ++ptr; while (is_not_CP(*ptr));
- /* Skip return_trace parameters */
+ BeamInstr *cp = cp_val(*ptr);
+
+ if (*cp == i_return_time_trace) {
ptr += 2;
- } else if (*cp_val(*ptr) == i_return_to_trace) {
- /* Skip stack frame variables */
- do ++ptr; while (is_not_CP(*ptr));
+ } else if (*cp == i_return_to_trace) {
+ ptr += 1;
+ } else if (*cp == i_return_trace) {
+ ptr += 3;
} else {
- BeamInstr *cp = cp_val(*ptr);
if (cp != prev) {
/* Record non-duplicates only */
prev = cp;
@@ -1614,7 +1665,6 @@ static void
save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
ErtsCodeMFA *bif_mfa, Eterm args) {
struct StackTrace* s;
- Eterm *stack_start;
int sz;
int depth = erts_backtrace_depth; /* max depth (never negative) */
@@ -1633,33 +1683,6 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
s->depth = 0;
/*
- * If we crash on an instruction that returns to a return/exception trace
- * instruction, we must set the stacktrace 'pc' to the actual return
- * address or we'll lose the top stackframe when gathering the stack
- * trace.
- */
- stack_start = STACK_TOP(c_p);
- if (stack_start < STACK_START(c_p) && is_CP(*stack_start)) {
- BeamInstr *cp = cp_val(*stack_start);
-
- if (cp == pc) {
- if (pc == beam_exception_trace || pc == beam_return_trace) {
- ASSERT(&stack_start[3] <= STACK_START(c_p));
- /* Fake having failed on the first instruction in the function
- * pointed to by the tag. */
- pc = cp_val(stack_start[1]);
- stack_start += 3;
- } else if (pc == beam_return_to_trace) {
- ASSERT(&stack_start[2] <= STACK_START(c_p));
- pc = cp_val(stack_start[1]);
- /* Skip both the trace tag and the new 'pc' to avoid
- * duplicated entries. */
- stack_start += 2;
- }
- }
- }
-
- /*
* If the failure was in a BIF other than 'error/1', 'error/2',
* 'exit/1' or 'throw/1', save BIF-MFA and save the argument
* registers by consing up an arglist.
@@ -1721,13 +1744,13 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
}
/* Save the actual stack trace */
- gather_stacktrace(c_p, stack_start, s, depth);
+ gather_stacktrace(c_p, s, depth);
}
void
erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
{
- gather_stacktrace(p, STACK_TOP(p), s, depth);
+ gather_stacktrace(p, s, depth);
}
/*
@@ -1986,83 +2009,66 @@ apply_bif_error_adjustment(Process *p, Export *ep,
Eterm *reg, Uint arity,
BeamInstr *I, Uint stack_offset)
{
+ int apply_only;
+ Uint need;
+
+ need = stack_offset /* bytes */ / sizeof(Eterm);
+ apply_only = stack_offset == 0;
+
/*
* I is only set when the apply is a tail call, i.e.,
* from the instructions i_apply_only, i_apply_last_P,
* and apply_last_IP.
*/
- if (I
- && BeamIsOpCode(ep->trampoline.op, op_apply_bif)
- && (ep == bif_export[BIF_error_1]
- || ep == bif_export[BIF_error_2]
- || ep == bif_export[BIF_exit_1]
- || ep == bif_export[BIF_throw_1])) {
- /*
- * We are about to tail apply one of the BIFs
- * erlang:error/1, erlang:error/2, erlang:exit/1,
- * or erlang:throw/1. Error handling of these BIFs is
- * special!
- *
- * We need the topmost continuation pointer to point into the
- * calling function when handling the error after the BIF has
- * been applied. This in order to get the topmost stackframe
- * correct.
- *
- * Note that these BIFs will unconditionally cause an
- * exception to be raised. That is, our modifications of the
- * stack will be corrected by the error handling code.
- */
- int apply_only = stack_offset == 0;
- BeamInstr *cpp;
- Eterm *E;
+ if (!(I && (ep == bif_export[BIF_error_1] ||
+ ep == bif_export[BIF_error_2] ||
+ ep == bif_export[BIF_exit_1] ||
+ ep == bif_export[BIF_throw_1]))) {
+ return;
+ }
- E = p->stop;
+ /*
+ * We are about to tail apply one of the BIFs erlang:error/1,
+ * erlang:error/2, erlang:exit/1, or erlang:throw/1. Error handling of
+ * these BIFs is special!
+ *
+ * We need the topmost continuation pointer to point into the calling
+ * function when handling the error after the BIF has been applied. This in
+ * order to get the topmost stackframe correct.
+ *
+ * Note that these BIFs will unconditionally cause an exception to be
+ * raised. That is, our modifications of the stack will be corrected by the
+ * error handling code.
+ */
+ if (need == 0) {
+ need = 1; /* i_apply_only */
+ }
- while (is_not_CP(*E)) {
- E++;
- }
- cpp = cp_val(E[0]);
+ if (p->stop - p->htop < need) {
+ erts_garbage_collect(p, (int) need, reg, arity+1);
+ }
+ if (apply_only) {
/*
- * If we find an exception/return-to trace continuation
- * pointer as the topmost continuation pointer, we do not
- * need to do anything since the information will already
- * be available for generation of the stacktrace.
+ * Called from the i_apply_only instruction.
+ *
+ * Push the continuation pointer for the current function to the stack.
*/
-
- if (cpp != beam_exception_trace
- && cpp != beam_return_trace
- && cpp != beam_return_to_trace) {
- Uint need = stack_offset /* bytes */ / sizeof(Eterm);
- if (need == 0)
- need = 1; /* i_apply_only */
- if (p->stop - p->htop < need)
- erts_garbage_collect(p, (int) need, reg, arity+1);
- if (apply_only) {
- /*
- * Called from the i_apply_only instruction.
- *
- * Push the continuation pointer for the current
- * function to the stack.
- */
- p->stop -= need;
- p->stop[0] = make_cp(I);
- } else {
- /*
- * Called from an i_apply_last_* instruction.
- *
- * The calling instruction will deallocate a stack
- * frame of size 'stack_offset'.
- *
- * Push the continuation pointer for the current
- * function to the stack, and then add a dummy
- * stackframe for the i_apply_last* instruction
- * to discard.
- */
- p->stop[0] = make_cp(I);
- p->stop -= need;
- }
- }
+ p->stop -= need;
+ p->stop[0] = make_cp(I);
+ } else {
+ /*
+ * Called from an i_apply_last_* instruction.
+ *
+ * The calling instruction will deallocate a stack frame of size
+ * 'stack_offset'.
+ *
+ * Push the continuation pointer for the current function to the stack,
+ * and then add a dummy stackframe for the i_apply_last* instruction
+ * to discard.
+ */
+ p->stop[0] = make_cp(I);
+ p->stop -= need;
}
}
@@ -3117,8 +3123,7 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity)
return 0;
}
- return ep->addressv[erts_active_code_ix()] == ep->trampoline.raw &&
- BeamIsOpCode(ep->trampoline.op, op_apply_bif);
+ return ep->bif_table_index != -1;
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 2b4b16983c..45122fe933 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -141,7 +141,7 @@ typedef struct {
* eventually patch with a pointer into
* the export entry.
*/
- BifFunction bf; /* Pointer to BIF function if BIF;
+ Export *bif; /* Pointer to export entry if BIF;
* NULL otherwise.
*/
} ImportEntry;
@@ -849,9 +849,7 @@ erts_finish_loading(Binary* magic, Process* c_p,
DBG_CHECK_EXPORT(ep, code_ix);
if (ep->addressv[code_ix] == ep->trampoline.raw) {
- if (BeamIsOpCode(ep->trampoline.op, op_apply_bif)) {
- continue;
- } else if (BeamIsOpCode(ep->trampoline.op, op_i_generic_breakpoint)) {
+ if (BeamIsOpCode(ep->trampoline.op, op_i_generic_breakpoint)) {
ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
ASSERT(mod_tab_p->curr.num_traced_exports > 0);
@@ -1479,15 +1477,14 @@ load_import_table(LoaderState* stp)
}
stp->import[i].arity = arity;
stp->import[i].patches = 0;
- stp->import[i].bf = NULL;
+ stp->import[i].bif = NULL;
/*
- * If the export entry refers to a BIF, get the pointer to
- * the BIF function.
+ * If the export entry refers to a BIF, save a pointer to the BIF entry.
*/
if ((e = erts_active_export_entry(mod, func, arity)) != NULL) {
- if (BeamIsOpCode(e->trampoline.op, op_apply_bif)) {
- stp->import[i].bf = (BifFunction) e->trampoline.bif.func;
+ if (e->bif_table_index != -1) {
+ stp->import[i].bif = e;
if (func == am_load_nif && mod == am_erlang && arity == 2) {
stp->may_load_nif = 1;
}
@@ -1538,33 +1535,6 @@ read_export_table(LoaderState* stp)
LoadError2(stp, "export table entry %u: label %u not resolved", i, n);
}
stp->export[i].address = address = stp->codev + value;
-
- /*
- * Find out if there is a BIF with the same name.
- */
-
- if (!is_bif(stp->module, func, arity)) {
- continue;
- }
-
- /*
- * This is a stub for a BIF.
- *
- * It should not be exported, and the information in its
- * func_info instruction should be invalidated so that it
- * can be filtered out by module_info(functions) and by
- * any other functions that walk through all local functions.
- */
-
- if (stp->labels[n].num_patches > 0) {
- LoadError3(stp, "there are local calls to the stub for "
- "the BIF %T:%T/%d",
- stp->module, func, arity);
- }
- stp->export[i].address = NULL;
- address[-1] = 0;
- address[-2] = NIL;
- address[-3] = NIL;
}
return 1;
@@ -1572,25 +1542,16 @@ read_export_table(LoaderState* stp)
return 0;
}
-
static int
is_bif(Eterm mod, Eterm func, unsigned arity)
{
- Export* e = erts_active_export_entry(mod, func, arity);
- if (e == NULL) {
- return 0;
- }
- if (! BeamIsOpCode(e->trampoline.op, op_apply_bif)) {
- return 0;
- }
- if (mod == am_erlang && func == am_apply && arity == 3) {
- /*
- * erlang:apply/3 is a special case -- it is implemented
- * as an instruction and it is OK to redefine it.
- */
- return 0;
+ Export *e = erts_active_export_entry(mod, func, arity);
+
+ if (e != NULL) {
+ return e->bif_table_index != -1;
}
- return 1;
+
+ return 0;
}
static int
@@ -2542,10 +2503,14 @@ load_code(LoaderState* stp)
if (i >= stp->num_imports) {
LoadError1(stp, "invalid import table index %d", i);
}
- if (stp->import[i].bf == NULL) {
+ if (stp->import[i].bif == NULL) {
LoadError1(stp, "not a BIF: import table index %d", i);
}
- code[ci++] = (BeamInstr) stp->import[i].bf;
+ {
+ int bif_index = stp->import[i].bif->bif_table_index;
+ BifEntry *bif_entry = &bif_table[bif_index];
+ code[ci++] = (BeamInstr) bif_entry->f;
+ }
break;
case 'P': /* Byte offset into tuple or stack */
case 'Q': /* Like 'P', but packable */
@@ -2853,18 +2818,43 @@ load_code(LoaderState* stp)
switch (stp->specific_op) {
case op_i_func_info_IaaI:
{
+ int padding_required;
Sint offset;
+
if (function_number >= stp->num_functions) {
LoadError1(stp, "too many functions in module (header said %u)",
stp->num_functions);
}
- if (stp->may_load_nif) {
+ /* Native function calls may be larger than their stubs, so
+ * we'll need to make sure any potentially-native function stub
+ * is padded with enough room.
+ *
+ * Note that the padding is applied for the previous function,
+ * not the current one, so we check whether the old F/A is
+ * a BIF. */
+ padding_required = last_func_start && (stp->may_load_nif ||
+ is_bif(stp->module, stp->function, stp->arity));
+
+ /*
+ * Save context for error messages.
+ */
+ stp->function = code[ci-2];
+ stp->arity = code[ci-1];
+
+ /*
+ * Save current offset of into the line instruction array.
+ */
+ if (stp->func_line) {
+ stp->func_line[function_number] = stp->current_li;
+ }
+
+ if (padding_required) {
const int finfo_ix = ci - FUNC_INFO_SZ;
- if (finfo_ix - last_func_start < BEAM_NIF_MIN_FUNC_SZ && last_func_start) {
+ if (finfo_ix - last_func_start < BEAM_NATIVE_MIN_FUNC_SZ) {
/* Must make room for call_nif op */
- int pad = BEAM_NIF_MIN_FUNC_SZ - (finfo_ix - last_func_start);
- ASSERT(pad > 0 && pad < BEAM_NIF_MIN_FUNC_SZ);
+ int pad = BEAM_NATIVE_MIN_FUNC_SZ - (finfo_ix - last_func_start);
+ ASSERT(pad > 0 && pad < BEAM_NATIVE_MIN_FUNC_SZ);
CodeNeed(pad);
sys_memmove(&code[finfo_ix+pad], &code[finfo_ix],
FUNC_INFO_SZ*sizeof(BeamInstr));
@@ -2875,20 +2865,6 @@ load_code(LoaderState* stp)
}
last_func_start = ci;
- /*
- * Save current offset of into the line instruction array.
- */
-
- if (stp->func_line) {
- stp->func_line[function_number] = stp->current_li;
- }
-
- /*
- * Save context for error messages.
- */
- stp->function = code[ci-2];
- stp->arity = code[ci-1];
-
/* When this assert is triggered, it is normally a sign that
the size of the ops.tab i_func_info instruction is not
the same as FUNC_INFO_SZ */
@@ -2918,7 +2894,6 @@ load_code(LoaderState* stp)
case op_i_bs_match_string_yfWW:
new_string_patch(stp, ci-1);
break;
-
case op_catch_yf:
/* code[ci-3] &&lb_catch_yf
* code[ci-2] y-register offset in E
@@ -3192,6 +3167,26 @@ is_killed_by_make_fun(LoaderState* stp, GenOpArg Reg, GenOpArg idx)
}
}
+/* Test whether Bif is "heavy" and should always go through its export entry */
+static int
+is_heavy_bif(LoaderState* stp, GenOpArg Bif)
+{
+ Export *bif_export;
+
+ if (Bif.type != TAG_u || Bif.val >= stp->num_imports) {
+ return 0;
+ }
+
+ bif_export = stp->import[Bif.val].bif;
+
+ if (bif_export) {
+ int bif_index = bif_export->bif_table_index;
+ return bif_table[bif_index].kind == BIF_KIND_HEAVY;
+ }
+
+ return 0;
+}
+
/*
* Generate an instruction for element/2.
*/
@@ -5219,25 +5214,28 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
*/
for (i = 0; i < stp->num_exps; i++) {
- Export* ep;
- BeamInstr* address = stp->export[i].address;
+ Export* ep;
+ BeamInstr* address = stp->export[i].address;
- if (address == NULL) {
- /* Skip stub for a BIF */
- continue;
- }
- ep = erts_export_put(stp->module, stp->export[i].function,
- stp->export[i].arity);
- if (on_load) {
- /*
- * on_load: Don't make any of the exported functions
- * callable yet. Keep any function in the current
- * code callable.
- */
+ ep = erts_export_put(stp->module,
+ stp->export[i].function,
+ stp->export[i].arity);
+
+ /* Fill in BIF stubs with a proper call to said BIF. */
+ if (ep->bif_table_index != -1) {
+ erts_write_bif_wrapper(ep, address);
+ }
+
+ if (on_load) {
+ /*
+ * on_load: Don't make any of the exported functions
+ * callable yet. Keep any function in the current
+ * code callable.
+ */
ep->trampoline.not_loaded.deferred = (BeamInstr) address;
- }
- else
+ } else {
ep->addressv[erts_staging_code_ix()] = address;
+ }
}
/*
@@ -5411,15 +5409,14 @@ transform_engine(LoaderState* st)
i = instr->a[ap].val;
ASSERT(i < st->num_imports);
- if (i >= st->num_imports || st->import[i].bf == NULL)
+ if (i >= st->num_imports || st->import[i].bif == NULL)
goto restart;
if (bif_number != -1 &&
- bif_export[bif_number]->trampoline.bif.func != (BeamInstr) st->import[i].bf) {
+ bif_export[bif_number] != st->import[i].bif) {
goto restart;
}
}
break;
-
#endif
#if defined(TOP_is_not_bif)
case TOP_is_not_bif:
@@ -5449,7 +5446,7 @@ transform_engine(LoaderState* st)
* they are special.
*/
if (i < st->num_imports) {
- if (st->import[i].bf != NULL ||
+ if (st->import[i].bif != NULL ||
(st->import[i].module == am_erlang &&
st->import[i].function == am_apply &&
(st->import[i].arity == 2 || st->import[i].arity == 3))) {
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 156c3c45e2..e7127c5b08 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -106,7 +106,7 @@ typedef struct beam_code_header {
}BeamCodeHeader;
-# define BEAM_NIF_MIN_FUNC_SZ 4
+# define BEAM_NATIVE_MIN_FUNC_SZ 4
void erts_release_literal_area(struct ErtsLiteralArea_* literal_area);
int erts_is_module_native(BeamCodeHeader* code);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index aba3af3424..593ddf9a29 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -4982,7 +4982,17 @@ void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
ep->info.mfa.function = f;
ep->info.mfa.arity = a;
ep->trampoline.op = BeamOpCodeAddr(op_apply_bif);
- ep->trampoline.bif.func = (BeamInstr) bif;
+ ep->trampoline.raw[1] = (BeamInstr)bif;
+}
+
+/*
+ * Writes a BIF call wrapper to the given address.
+ */
+void erts_write_bif_wrapper(Export *export, BeamInstr *address) {
+ BifEntry *entry = &bif_table[export->bif_table_index];
+
+ address[0] = BeamOpCodeAddr(op_apply_bif);
+ address[1] = (BeamInstr)entry->f;
}
void erts_init_bif(void)
@@ -5073,7 +5083,7 @@ static BIF_RETTYPE dirty_bif_trap(BIF_ALIST)
* correct by call to dirty_bif_trap()...
*/
- ASSERT(BIF_P->arity == nep->exp.info.mfa.arity);
+ ASSERT(BIF_P->arity == nep->trampoline.info.mfa.arity);
erts_nif_export_restore(BIF_P, nep, THE_NON_VALUE);
@@ -5129,6 +5139,7 @@ erts_schedule_bif(Process *proc,
if (!ERTS_PROC_IS_EXITING(c_p)) {
Export *exp;
BifFunction dbif, ibif;
+ BeamInstr call_instr;
BeamInstr *pc;
/*
@@ -5163,27 +5174,40 @@ erts_schedule_bif(Process *proc,
if (i == NULL) {
ERTS_INTERNAL_ERROR("Missing instruction pointer");
}
+
+ if (BeamIsOpCode(*i, op_i_generic_breakpoint)) {
+ ErtsCodeInfo *ci;
+ GenericBp *bp;
+
+ ci = erts_code_to_codeinfo(i);
+ bp = ci->u.gen_bp;
+
+ call_instr = bp->orig_instr;
+ } else {
+ call_instr = *i;
+ }
+
#ifdef HIPE
- else if (proc->flags & F_HIPE_MODE) {
+ if (proc->flags & F_HIPE_MODE) {
/* Pointer to bif export in i */
exp = (Export *) i;
pc = cp_val(c_p->stop[0]);
mfa = &exp->info.mfa;
- }
+ } else /* !! This is part of the if clause below !! */
#endif
- else if (BeamIsOpCode(*i, op_call_bif_e)) {
- /* Pointer to bif export in i+1 */
- exp = (Export *) i[1];
+ if (BeamIsOpCode(call_instr, op_call_light_bif_be)) {
+ /* Pointer to bif export in i+2 */
+ exp = (Export *) i[2];
pc = i;
mfa = &exp->info.mfa;
}
- else if (BeamIsOpCode(*i, op_call_bif_only_e)) {
- /* Pointer to bif export in i+1 */
- exp = (Export *) i[1];
+ else if (BeamIsOpCode(call_instr, op_call_light_bif_only_be)) {
+ /* Pointer to bif export in i+2 */
+ exp = (Export *) i[2];
pc = i;
mfa = &exp->info.mfa;
}
- else if (BeamIsOpCode(*i, op_apply_bif)) {
+ else if (BeamIsOpCode(call_instr, op_apply_bif)) {
pc = cp_val(c_p->stop[0]);
mfa = erts_code_to_codemfa(i);
}
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index c9f5177bd3..19dabc0514 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -25,13 +25,14 @@
#
# <bif-decl> ::= "bif" <bif> <C-name>* |
# "ubif" <bif> <C-name>* |
-# "gcbif" <bif> <C-name>*
+# "hbif" <bif> <C-name>*
# <bif> ::= <module> ":" <name> "/" <arity>
#
-# ubif: Use for operators and guard BIFs that never build anything
-# on the heap (such as tuple_size/1) and operators.
+# ubif: Use for operators and guard BIFs.
#
-# gcbif: Use for guard BIFs that may build on the heap (such as abs/1).
+# hbif: Use for BIFs that perform garbage collection or need up-to-date
+# information on where they were called from. These must be called
+# through the export entry.
#
# bif: Use for all other BIFs.
#
@@ -60,7 +61,7 @@ bif erlang:display_string/1
bif erlang:display_nl/0
ubif erlang:element/2
bif erlang:erase/0
-bif erlang:erase/1
+hbif erlang:erase/1
bif erlang:exit/1
bif erlang:exit/2
bif erlang:exit_signal/2
@@ -70,7 +71,7 @@ ubif erlang:float/1
bif erlang:float_to_list/1
bif erlang:float_to_list/2
bif erlang:fun_info/2
-bif erts_internal:garbage_collect/1
+hbif erts_internal:garbage_collect/1
bif erlang:get/0
bif erlang:get/1
bif erlang:get_keys/1
@@ -127,10 +128,10 @@ bif erlang:ports/0
bif erlang:pre_loaded/0
bif erlang:process_flag/2
bif erts_internal:process_flag/3
-bif erlang:process_info/1
-bif erlang:process_info/2
+hbif erlang:process_info/1
+hbif erlang:process_info/2
bif erlang:processes/0
-bif erlang:put/2
+hbif erlang:put/2
bif erlang:register/2
bif erlang:registered/0
ubif erlang:round/1
@@ -174,7 +175,7 @@ bif erts_internal:port_connect/2
bif erts_internal:request_system_task/3
bif erts_internal:request_system_task/4
-bif erts_internal:check_process_code/1
+hbif erts_internal:check_process_code/1
bif erts_internal:map_to_tuple_keys/1
bif erts_internal:term_type/1
@@ -466,7 +467,7 @@ bif code:is_module_native/1
# New Bifs in R9C.
#
-bif erlang:hibernate/3
+hbif erlang:hibernate/3
bif error_logger:warning_map/0
#
diff --git a/erts/emulator/beam/bif_instrs.tab b/erts/emulator/beam/bif_instrs.tab
index c02dcd543c..04d36b721c 100644
--- a/erts/emulator/beam/bif_instrs.tab
+++ b/erts/emulator/beam/bif_instrs.tab
@@ -212,26 +212,32 @@ i_length.execute(Fail, Live, Dst) {
// Call a BIF, store the result in x(0) and transfer control to the
// next instruction.
//
-call_bif(Exp) {
+call_light_bif(Bif, Exp) {
+ Export *export;
ErtsBifFunc bf;
+
Eterm result;
ErlHeapFragment *live_hf_end;
- Export *export = (Export*) $Exp;
+
+ bf = (ErtsBifFunc) $Bif;
+ export = (Export*) $Exp;
if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
/*
* If we have run out of reductions, do a context
* switch before calling the BIF.
*/
- c_p->arity = GET_BIF_ARITY(export);
+ c_p->arity = GET_EXPORT_ARITY(export);
c_p->current = &export->info.mfa;
goto context_switch3;
}
- ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(export),
- GET_BIF_ADDRESS(export));
+ if (ERTS_UNLIKELY(export->is_bif_traced)) {
+ $SAVE_CONTINUATION_POINTER($NEXT_INSTRUCTION);
+ $DISPATCH_EXPORT(export);
+ }
- bf = GET_BIF_ADDRESS(export);
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_EXPORT_MODULE(export), bf);
PRE_BIF_SWAPOUT(c_p);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
@@ -243,21 +249,26 @@ call_bif(Exp) {
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
live_hf_end = c_p->mbuf;
ERTS_CHK_MBUF_SZ(c_p);
+
result = (*bf)(c_p, reg, I);
+
+ /* Only heavy BIFs may GC. */
+ ASSERT(E == c_p->stop);
+
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_HOLE_CHECK(c_p);
ERTS_REQ_PROC_MAIN_LOCK(c_p);
if (ERTS_IS_GC_DESIRED(c_p)) {
- Uint arity = GET_BIF_ARITY(export);
+ Uint arity = GET_EXPORT_ARITY(export);
result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result,
reg, arity);
E = c_p->stop;
}
- PROCESS_MAIN_CHK_LOCKS(c_p);
HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
/*
@@ -282,7 +293,6 @@ call_bif(Exp) {
*/
$SAVE_CONTINUATION_POINTER($NEXT_INSTRUCTION);
SET_I(c_p->i);
- SWAPIN;
$DISPATCH();
}
@@ -299,27 +309,36 @@ call_bif(Exp) {
// Call a BIF tail-recursively, storing the result in x(0) and doing
// a return to the continuation poiner.
//
-
-call_bif_only(Exp) {
+call_light_bif_only(Bif, Exp) {
+ ErlHeapFragment *live_hf_end;
ErtsBifFunc bf;
+ Export *export;
Eterm result;
- ErlHeapFragment *live_hf_end;
- Export *export = (Export*) $Exp;
+
+ bf = (ErtsBifFunc) $Bif;
+ export = (Export*) $Exp;
if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
/*
* If we have run out of reductions, do a context
* switch before calling the BIF.
*/
- c_p->arity = GET_BIF_ARITY(export);
+ c_p->arity = GET_EXPORT_ARITY(export);
c_p->current = &export->info.mfa;
goto context_switch3;
}
- ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(export),
- GET_BIF_ADDRESS(export));
+ if (ERTS_UNLIKELY(export->is_bif_traced)) {
+ /* Set up a dummy stack frame so we can perform a normal call. Loader
+ * transformations ensure that the next instruction after this is
+ * 'deallocate_return 0'. */
+ $AH(0, 0, GET_EXPORT_ARITY(export));
- bf = GET_BIF_ADDRESS(export);
+ $SAVE_CONTINUATION_POINTER($NEXT_INSTRUCTION);
+ $DISPATCH_EXPORT(export);
+ }
+
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_EXPORT_MODULE(export), bf);
PRE_BIF_SWAPOUT(c_p);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
@@ -331,21 +350,26 @@ call_bif_only(Exp) {
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
live_hf_end = c_p->mbuf;
ERTS_CHK_MBUF_SZ(c_p);
+
result = (*bf)(c_p, reg, I);
+
+ /* Only heavy BIFs may GC. */
+ ASSERT(E == c_p->stop);
+
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_HOLE_CHECK(c_p);
ERTS_REQ_PROC_MAIN_LOCK(c_p);
if (ERTS_IS_GC_DESIRED(c_p)) {
- Uint arity = GET_BIF_ARITY(export);
+ Uint arity = GET_EXPORT_ARITY(export);
result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result,
reg, arity);
E = c_p->stop;
}
- PROCESS_MAIN_CHK_LOCKS(c_p);
HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
/*
@@ -370,7 +394,6 @@ call_bif_only(Exp) {
* to the continuation pointer on the stack will be done.
*/
SET_I(c_p->i);
- SWAPIN;
$DISPATCH();
}
@@ -445,7 +468,7 @@ nif_bif.call_nif() {
* I[2]: Pointer to erl_module_nif
* I[3]: Function pointer to dirty NIF
*
- * This layout is determined by the NifExport struct
+ * This layout is determined by the ErtsNativeFunc struct
*/
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
@@ -497,8 +520,8 @@ nif_bif.call_nif() {
nif_bif.apply_bif() {
/*
- * At this point, I points to the code[0] in the export entry for
- * the BIF:
+ * At this point, I points to the code[0] in the native function wrapper
+ * for the BIF:
*
* code[-3]: Module
* code[-2]: Function
@@ -515,21 +538,19 @@ nif_bif.apply_bif() {
codemfa = erts_code_to_codemfa(I);
- ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)Arg(0));
-
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)I[1]);
/* In case we apply process_info/1,2 or load_nif/1 */
c_p->current = codemfa;
$SET_CP_I_ABS(I); /* In case we apply check_process_code/2. */
c_p->arity = 0; /* To allow garbage collection on ourselves
- * (check_process_code/2).
- */
+ * (check_process_code/2, put/2, etc). */
DTRACE_BIF_ENTRY(c_p, codemfa);
SWAPOUT;
ERTS_DBG_CHK_REDS(c_p, FCALLS - 1);
c_p->fcalls = FCALLS - 1;
- vbf = (BifFunction) Arg(0);
+ vbf = (BifFunction) I[1];
PROCESS_MAIN_CHK_LOCKS(c_p);
bif_nif_arity = codemfa->arity;
ASSERT(bif_nif_arity <= 4);
@@ -582,9 +603,39 @@ nif_bif.epilogue() {
$DISPATCH();
}
{
- BeamInstr *cp = cp_val(*E);
+ BeamInstr *cp = printable_return_address(c_p, E);
ASSERT(VALID_INSTR(*cp));
I = handle_error(c_p, cp, reg, c_p->current);
}
goto post_error_handling;
}
+
+i_load_nif() {
+ //| -no_next
+ if (erts_try_seize_code_write_permission(c_p)) {
+ Eterm result;
+
+ PRE_BIF_SWAPOUT(c_p);
+ result = erts_load_nif(c_p, I, r(0), r(1));
+ erts_release_code_write_permission();
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ SWAPIN;
+
+ if (ERTS_LIKELY(is_value(result))) {
+ r(0) = result;
+ $NEXT0();
+ } else {
+ static ErtsCodeMFA mfa = {am_erlang, am_load_nif, 2};
+ c_p->freason = BADARG;
+ I = handle_error(c_p, I, reg, &mfa);
+ goto post_error_handling;
+ }
+ } else {
+ /* Yield and try again */
+ $SET_CP_I_ABS(I);
+ SWAPOUT;
+ c_p->current = NULL;
+ c_p->arity = 2;
+ goto do_schedule;
+ }
+}
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index b9f0334172..1bbc7d7f1e 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -653,8 +653,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
= erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
= erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
- fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_EXP_TRACE)]
- = sizeof(NifExportTrace);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
= sizeof(ErtsNSchedMagicRefTableEntry);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MINDIRECTION)]
@@ -2392,10 +2390,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
&size.processes_used,
fi,
ERTS_ALC_T_BIF_TIMER);
- add_fix_values(&size.processes,
- &size.processes_used,
- fi,
- ERTS_ALC_T_NIF_EXP_TRACE);
}
if (want.atom || want.atom_used) {
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 349977ebe7..cd978a8d57 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -332,7 +332,6 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type NIF_TRAP_EXPORT STANDARD PROCESSES nif_trap_export_entry
-type NIF_EXP_TRACE FIXED_SIZE PROCESSES nif_export_trace
type EXPORT LONG_LIVED CODE export_entry
type MONITOR FIXED_SIZE PROCESSES monitor
type MONITOR_SUSPEND STANDARD PROCESSES monitor_suspend
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 8844012e37..946fefa14f 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -158,8 +158,9 @@ static Eterm os_version_tuple;
static Eterm
current_function(Process* p, ErtsHeapFactory *hfact, Process* rp,
int full_info, Uint reserve_size, int flags);
-static Eterm current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
- Uint reserve_size);
+static Eterm
+current_stacktrace(Process* p, ErtsHeapFactory *hfact, Process* rp,
+ Uint reserve_size, int flags);
Eterm
erts_bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh, Eterm tail)
@@ -1383,7 +1384,7 @@ process_info_aux(Process *c_p,
break;
case ERTS_PI_IX_CURRENT_STACKTRACE:
- res = current_stacktrace(hfact, rp, reserve_size);
+ res = current_stacktrace(c_p, hfact, rp, reserve_size, flags);
break;
case ERTS_PI_IX_INITIAL_CALL:
@@ -2021,21 +2022,44 @@ current_function(Process *c_p, ErtsHeapFactory *hfact, Process* rp,
}
if (c_p == rp && !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) {
- FunctionInfo fi2;
- BeamInstr* continuation_ptr;
+ BeamInstr* return_address;
+ FunctionInfo caller_fi;
+ Eterm *ptr;
- /*
- * The current function is erlang:process_info/{1,2},
- * which is not the answer that the application want.
- * We will use the continuation pointer stored at the
- * top of the stack instead.
- */
- continuation_ptr = (BeamInstr *) rp->stop[0];
- erts_lookup_function_info(&fi2, continuation_ptr, full_info);
- if (fi2.mfa) {
- fi = fi2;
- rp->current = fi2.mfa;
- }
+ /*
+ * The current function is erlang:process_info/{1,2}, and we've
+ * historically returned the *calling* function in that case. We
+ * therefore use the continuation pointer stored at the top of the
+ * stack instead, which is safe since process_info is a "heavy" BIF
+ * that is only called through its export entry.
+ */
+
+ return_address = NULL;
+ ptr = STACK_TOP(rp);
+ ASSERT(is_CP(*ptr));
+
+ while (ptr < STACK_START(rp)) {
+ BeamInstr *cp = cp_val(*ptr);
+
+ if (*cp == BeamOpCodeAddr(op_return_trace)) {
+ ptr += 3;
+ } else if (*cp == BeamOpCodeAddr(op_i_return_time_trace)) {
+ ptr += 2;
+ } else if (*cp == BeamOpCodeAddr(op_i_return_to_trace)) {
+ ptr += 1;
+ } else {
+ return_address = cp;
+ break;
+ }
+ }
+
+ ASSERT(return_address != NULL);
+
+ erts_lookup_function_info(&caller_fi, return_address, full_info);
+ if (caller_fi.mfa) {
+ fi = caller_fi;
+ rp->current = caller_fi.mfa;
+ }
}
/*
@@ -2056,8 +2080,8 @@ current_function(Process *c_p, ErtsHeapFactory *hfact, Process* rp,
}
static Eterm
-current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
- Uint reserve_size)
+current_stacktrace(Process *p, ErtsHeapFactory *hfact, Process* rp,
+ Uint reserve_size, int flags)
{
Uint sz;
struct StackTrace* s;
@@ -2074,9 +2098,14 @@ current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
s->depth = 0;
- if (depth > 0 && rp->i) {
- s->trace[s->depth++] = rp->i;
- depth--;
+ s->pc = NULL;
+
+ /* We skip current pc when requesting our own stack trace since it will
+ * inevitably point to process_info/1,2 */
+ if ((p != rp || (flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) &&
+ depth > 0 && rp->i) {
+ s->trace[s->depth++] = rp->i;
+ depth--;
}
erts_save_stacktrace(rp, s, depth);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 6fed112627..e03c97fe10 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -80,9 +80,6 @@ static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
static Eterm trace_info_event(Process* p, Eterm event, Eterm key);
-
-static void reset_bif_trace(void);
-static void setup_bif_trace(void);
static void install_exp_breakpoints(BpFunctions* f);
static void uninstall_exp_breakpoints(BpFunctions* f);
static void clean_export_entries(BpFunctions* f);
@@ -1053,8 +1050,7 @@ static int function_is_traced(Process *p,
int r = 0;
- ASSERT(BeamIsOpCode(*pc, op_apply_bif) ||
- BeamIsOpCode(*pc, op_i_generic_breakpoint));
+ ASSERT(BeamIsOpCode(*pc, op_i_generic_breakpoint));
if (erts_is_trace_break(&ep->info, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
@@ -1426,18 +1422,21 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
int n;
BpFunction* fp;
- /*
- * First work on normal functions (not real BIFs).
- */
-
erts_bp_match_export(&finish_bp.e, mfa, specified);
fp = finish_bp.e.matching;
n = finish_bp.e.matched;
for (i = 0; i < n; i++) {
ErtsCodeInfo *ci = fp[i].ci;
- BeamInstr* pc = erts_codeinfo_to_code(ci);
- Export* ep = ErtsContainerStruct(ci, Export, info);
+ BeamInstr* pc;
+ Export* ep;
+
+ pc = erts_codeinfo_to_code(ci);
+ ep = ErtsContainerStruct(ci, Export, info);
+
+ if (ep->bif_table_index != -1) {
+ ep->is_bif_traced = !!on;
+ }
if (on && !flags.breakpoint) {
/* Turn on global call tracing */
@@ -1449,7 +1448,7 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
ep->trampoline.op = BeamOpCodeAddr(op_trace_jump_W);
ep->trampoline.trace.address = (BeamInstr) ep->addressv[code_ix];
}
- erts_set_call_trace_bif(ci, match_prog_set, 0);
+ erts_set_export_trace(ci, match_prog_set, 0);
if (ep->addressv[code_ix] != pc) {
ep->trampoline.op = BeamOpCodeAddr(op_i_generic_breakpoint);
}
@@ -1460,7 +1459,7 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
* Turn off global tracing, either explicitly or implicitly
* before turning on breakpoint tracing.
*/
- erts_clear_call_trace_bif(ci, 0);
+ erts_clear_export_trace(ci, 0);
if (BeamIsOpCode(ep->trampoline.op, op_i_generic_breakpoint)) {
ep->trampoline.op = BeamOpCodeAddr(op_trace_jump_W);
}
@@ -1468,83 +1467,6 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
}
/*
- ** OK, now for the bif's
- */
- for (i = 0; i < BIF_SIZE; ++i) {
- Export *ep = bif_export[i];
-
- if (!ExportIsBuiltIn(ep)) {
- continue;
- }
-
- if (bif_table[i].f == bif_table[i].traced) {
- /* Trace wrapper same as regular function - untraceable */
- continue;
- }
-
- switch (specified) {
- case 3:
- if (mfa->arity != ep->info.mfa.arity)
- continue;
- case 2:
- if (mfa->function != ep->info.mfa.function)
- continue;
- case 1:
- if (mfa->module != ep->info.mfa.module)
- continue;
- case 0:
- break;
- default:
- ASSERT(0);
- }
-
- if (! flags.breakpoint) { /* Export entry call trace */
- if (on) {
- erts_clear_call_trace_bif(&ep->info, 1);
- erts_clear_mtrace_bif(&ep->info);
- erts_set_call_trace_bif(&ep->info, match_prog_set, 0);
- } else { /* off */
- erts_clear_call_trace_bif(&ep->info, 0);
- }
- matches++;
- } else { /* Breakpoint call trace */
- int m = 0;
-
- if (on) {
- if (flags.local) {
- erts_clear_call_trace_bif(&ep->info, 0);
- erts_set_call_trace_bif(&ep->info, match_prog_set, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_set_mtrace_bif(&ep->info, meta_match_prog_set,
- meta_tracer);
- m = 1;
- }
- if (flags.call_time) {
- erts_set_time_trace_bif(&ep->info, on);
- /* I don't want to remove any other tracers */
- m = 1;
- }
- } else { /* off */
- if (flags.local) {
- erts_clear_call_trace_bif(&ep->info, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_clear_mtrace_bif(&ep->info);
- m = 1;
- }
- if (flags.call_time) {
- erts_clear_time_trace_bif(&ep->info);
- m = 1;
- }
- }
- matches += m;
- }
- }
-
- /*
** So, now for breakpoint tracing
*/
erts_bp_match_functions(&finish_bp.f, mfa, specified);
@@ -1670,7 +1592,6 @@ erts_finish_breakpointing(void)
install_exp_breakpoints(&finish_bp.e);
}
}
- setup_bif_trace();
return 1;
case 1:
/*
@@ -1699,7 +1620,6 @@ erts_finish_breakpointing(void)
uninstall_exp_breakpoints(&finish_bp.e);
}
}
- reset_bif_trace();
return 1;
case 3:
/*
@@ -1710,7 +1630,6 @@ erts_finish_breakpointing(void)
* updated). If any breakpoints have been totally disabled,
* deallocate the GenericBp structs for them.
*/
- erts_consolidate_bif_bp_data();
clean_export_entries(&finish_bp.e);
erts_consolidate_bp_data(&finish_bp.e, 0);
erts_consolidate_bp_data(&finish_bp.f, 1);
@@ -1782,41 +1701,6 @@ clean_export_entries(BpFunctions* f)
}
}
-static void
-setup_bif_trace(void)
-{
- int i;
-
- for (i = 0; i < BIF_SIZE; ++i) {
- Export *ep = bif_export[i];
- GenericBp* g = ep->info.u.gen_bp;
- if (g) {
- if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->trampoline.bif.func != 0);
- ep->trampoline.bif.func = (BeamInstr) bif_table[i].traced;
- }
- }
- }
-}
-
-static void
-reset_bif_trace(void)
-{
- int i;
- ErtsBpIndex active = erts_active_bp_ix();
-
- for (i = 0; i < BIF_SIZE; ++i) {
- Export *ep = bif_export[i];
- GenericBp* g = ep->info.u.gen_bp;
- if (g && g->data[active].flags == 0) {
- if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->trampoline.bif.func != 0);
- ep->trampoline.bif.func = (BeamInstr) bif_table[i].f;
- }
- }
- }
-}
-
/*
* Sequential tracing
*
diff --git a/erts/emulator/beam/erl_nfunc_sched.c b/erts/emulator/beam/erl_nfunc_sched.c
index fc9ec4e9f5..0b9e54dfc5 100644
--- a/erts/emulator/beam/erl_nfunc_sched.c
+++ b/erts/emulator/beam/erl_nfunc_sched.c
@@ -33,22 +33,16 @@
NifExport *
erts_new_proc_nif_export(Process *c_p, int argc)
{
- size_t size;
- int i;
NifExport *nep, *old_nep;
+ size_t size;
size = sizeof(NifExport) + (argc-1)*sizeof(Eterm);
nep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, size);
- for (i = 0; i < ERTS_NUM_CODE_IX; i++)
- nep->exp.addressv[i] = &nep->exp.trampoline.raw[0];
-
nep->argc = -1; /* unused marker */
nep->argv_size = argc;
- nep->trace = NULL;
old_nep = ERTS_PROC_SET_NIF_TRAP_EXPORT(c_p, nep);
if (old_nep) {
- ASSERT(!nep->trace);
erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, old_nep);
}
return nep;
@@ -65,39 +59,6 @@ erts_destroy_nif_export(Process *p)
}
}
-void
-erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
- Export* ep, Uint32 flags,
- Uint32 flags_meta, BeamInstr* I,
- ErtsTracer meta_tracer)
-{
- NifExportTrace *netp;
- ASSERT(nep && nep->argc >= 0);
- ASSERT(!nep->trace);
- netp = erts_alloc(ERTS_ALC_T_NIF_EXP_TRACE,
- sizeof(NifExportTrace));
- netp->applying = applying;
- netp->ep = ep;
- netp->flags = flags;
- netp->flags_meta = flags_meta;
- netp->I = I;
- netp->meta_tracer = NIL;
- erts_tracer_update(&netp->meta_tracer, meta_tracer);
- nep->trace = netp;
-}
-
-void
-erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep)
-{
- NifExportTrace *netp = nep->trace;
- nep->trace = NULL;
- erts_bif_trace_epilogue(c_p, result, netp->applying, netp->ep,
- netp->flags, netp->flags_meta,
- netp->I, netp->meta_tracer);
- erts_tracer_update(&netp->meta_tracer, NIL);
- erts_free(ERTS_ALC_T_NIF_EXP_TRACE, netp);
-}
-
NifExport *
erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
ErtsCodeMFA *mfa, BeamInstr *pc,
@@ -165,14 +126,14 @@ erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
reg[i] = argv[i];
}
ASSERT(is_atom(mod) && is_atom(func));
- nep->exp.info.mfa.module = mod;
- nep->exp.info.mfa.function = func;
- nep->exp.info.mfa.arity = (Uint) argc;
- nep->exp.trampoline.op = (BeamInstr) instr; /* call_nif || apply_bif */
- nep->exp.trampoline.raw[1] = (BeamInstr) dfunc;
+ nep->trampoline.info.mfa.module = mod;
+ nep->trampoline.info.mfa.function = func;
+ nep->trampoline.info.mfa.arity = (Uint) argc;
+ nep->trampoline.call_op = (BeamInstr) instr; /* call_nif || apply_bif */
+ nep->trampoline.dfunc = (BeamInstr) dfunc;
nep->func = ifunc;
used_proc->arity = argc;
used_proc->freason = TRAP;
- used_proc->i = (BeamInstr*) nep->exp.addressv[0];
+ used_proc->i = (BeamInstr*)&nep->trampoline.call_op;
return nep;
}
diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h
index 54d011695a..033ba58ded 100644
--- a/erts/emulator/beam/erl_nfunc_sched.h
+++ b/erts/emulator/beam/erl_nfunc_sched.h
@@ -25,15 +25,6 @@
#include "bif.h"
#include "error.h"
-typedef struct {
- int applying;
- Export* ep;
- Uint32 flags;
- Uint32 flags_meta;
- BeamInstr* I;
- ErtsTracer meta_tracer;
-} NifExportTrace;
-
/*
* NIF exports need a few more items than the Export struct provides,
* including the erl_module_nif* and a NIF function pointer, so the
@@ -45,11 +36,15 @@ typedef struct {
*/
typedef struct {
- Export exp;
+ struct {
+ ErtsCodeInfo info;
+ BeamInstr call_op; /* call_nif || apply_bif */
+ BeamInstr dfunc;
+ } trampoline;
+
struct erl_module_nif* m; /* NIF module, or NULL if BIF */
void *func; /* Indirect NIF or BIF to execute (may be unused) */
ErtsCodeMFA *current;/* Current as set when originally called */
- NifExportTrace *trace;
/* --- The following is only used on error --- */
BeamInstr *pc; /* Program counter */
ErtsCodeMFA *mfa; /* MFA of original call */
@@ -59,11 +54,6 @@ typedef struct {
} NifExport;
NifExport *erts_new_proc_nif_export(Process *c_p, int argc);
-void erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
- Export* ep, Uint32 flags,
- Uint32 flags_meta, BeamInstr* I,
- ErtsTracer meta_tracer);
-void erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep);
void erts_destroy_nif_export(Process *p);
NifExport *erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
ErtsCodeMFA *mfa, BeamInstr *pc,
@@ -81,11 +71,6 @@ ERTS_GLB_INLINE void erts_nif_export_restore(Process *c_p, NifExport *ep,
Eterm result);
ERTS_GLB_INLINE void erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
Eterm *reg, ErtsCodeMFA **nif_mfa);
-ERTS_GLB_INLINE int erts_nif_export_check_save_trace(Process *c_p, Eterm result,
- int applying, Export* ep,
- Uint32 flags,
- Uint32 flags_meta, BeamInstr* I,
- ErtsTracer meta_tracer);
ERTS_GLB_INLINE Process *erts_proc_shadow2real(Process *c_p);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -147,8 +132,6 @@ erts_nif_export_restore(Process *c_p, NifExport *ep, Eterm result)
c_p->current = ep->current;
ep->argc = -1; /* Unused nif-export marker... */
- if (ep->trace)
- erts_nif_export_restore_trace(c_p, result, ep);
}
ERTS_GLB_INLINE void
@@ -166,25 +149,6 @@ erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
erts_nif_export_restore(c_p, nep, THE_NON_VALUE);
}
-ERTS_GLB_INLINE int
-erts_nif_export_check_save_trace(Process *c_p, Eterm result,
- int applying, Export* ep,
- Uint32 flags,
- Uint32 flags_meta, BeamInstr* I,
- ErtsTracer meta_tracer)
-{
- if (is_non_value(result) && c_p->freason == TRAP) {
- NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
- if (nep && nep->argc >= 0) {
- erts_nif_export_save_trace(c_p, nep, applying, ep,
- flags, flags_meta,
- I, meta_tracer);
- return 1;
- }
- }
- return 0;
-}
-
ERTS_GLB_INLINE Process *
erts_proc_shadow2real(Process *c_p)
{
@@ -208,7 +172,7 @@ erts_proc_shadow2real(Process *c_p)
#define ERTS_I_BEAM_OP_TO_NIF_EXPORT(I) \
(ASSERT(BeamIsOpCode(*(I), op_apply_bif) || \
BeamIsOpCode(*(I), op_call_nif)), \
- ((NifExport *) (((char *) (I)) - offsetof(NifExport, exp.trampoline.raw[0]))))
+ ((NifExport *) (((char *) (I)) - offsetof(NifExport, trampoline.call_op))))
#include "erl_message.h"
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 46f7e864fd..5a5ea07d89 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -2944,7 +2944,7 @@ static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg,
* parts (located in code).
*/
- ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ ep = ErtsContainerStruct(proc->current, NifExport, trampoline.info.mfa);
mod = proc->current->module;
func = proc->current->function;
fp = (NativeFunPtr) ep->func;
@@ -2988,7 +2988,7 @@ execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
execution_state(env, &proc, NULL);
- ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ ep = ErtsContainerStruct(proc->current, NifExport, trampoline.info.mfa);
fp = ep->func;
ASSERT(ep);
ASSERT(!env->exception_thrown);
@@ -4117,7 +4117,23 @@ static struct erl_module_nif* create_lib(const ErlNifEntry* src)
return lib;
};
-BIF_RETTYPE load_nif_2(BIF_ALIST_2)
+/* load_nif/2 is implemented as an instruction as it needs to know where it
+ * was called from, and it's a pain to get that information in a BIF.
+ *
+ * This is a small stub that rejects apply(erlang, load_nif, [Path, Args]). */
+BIF_RETTYPE load_nif_2(BIF_ALIST_2) {
+ if (BIF_P->flags & F_HIPE_MODE) {
+ BIF_RET(load_nif_error(BIF_P, "notsup",
+ "Calling load_nif from HiPE compiled modules "
+ "not supported"));
+ }
+
+ BIF_RET(load_nif_error(BIF_P, "bad_lib",
+ "load_nif/2 must be explicitly called from the NIF "
+ "module. It cannot be called through apply/3."));
+}
+
+Eterm erts_load_nif(Process *c_p, BeamInstr *I, Eterm filename, Eterm args)
{
static const char bad_lib[] = "bad_lib";
static const char upgrade[] = "upgrade";
@@ -4139,13 +4155,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
struct erl_module_nif* lib = NULL;
struct erl_module_instance* this_mi;
struct erl_module_instance* prev_mi;
- BeamInstr* caller_cp;
-
- if (BIF_P->flags & F_HIPE_MODE) {
- ret = load_nif_error(BIF_P, "notsup", "Calling load_nif from HiPE compiled "
- "modules not supported");
- BIF_RET(ret);
- }
encoding = erts_get_native_filename_encoding();
if (encoding == ERL_FILENAME_WIN_WCHAR) {
@@ -4153,30 +4162,19 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/* since lib_name is used in error messages */
encoding = ERL_FILENAME_UTF8;
}
- lib_name = erts_convert_filename_to_encoding(BIF_ARG_1, NULL, 0,
+ lib_name = erts_convert_filename_to_encoding(filename, NULL, 0,
ERTS_ALC_T_TMP, 1, 0, encoding,
NULL, 0);
if (!lib_name) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
- if (!erts_try_seize_code_write_permission(BIF_P)) {
- erts_free(ERTS_ALC_T_TMP, lib_name);
- ERTS_BIF_YIELD2(bif_export[BIF_load_nif_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
+ return THE_NON_VALUE;
}
/* Block system (is this the right place to do it?) */
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
erts_thr_progress_block();
/* Find calling module */
- ASSERT(BIF_P->current != NULL);
- ASSERT(BIF_P->current->module == am_erlang
- && BIF_P->current->function == am_load_nif
- && BIF_P->current->arity == 2);
- caller_cp = cp_val(BIF_P->stop[0]);
- caller = find_function_from_pc(caller_cp);
+ caller = find_function_from_pc(I);
ASSERT(caller != NULL);
mod_atom = caller->module;
ASSERT(is_atom(mod_atom));
@@ -4196,7 +4194,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
this_mi = &module_p->curr;
prev_mi = &module_p->old;
if (in_area(caller, module_p->old.code_hdr, module_p->old.code_length)) {
- ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old "
+ ret = load_nif_error(c_p, "old_code", "Calling load_nif from old "
"module '%T' not allowed", mod_atom);
goto error;
} else if (module_p->on_load) {
@@ -4210,52 +4208,52 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
if (this_mi->nif != NULL) {
- ret = load_nif_error(BIF_P,"reload","NIF library already loaded"
+ ret = load_nif_error(c_p,"reload","NIF library already loaded"
" (reload disallowed since OTP 20).");
}
else if (init_func == NULL &&
(err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
const char slogan[] = "Failed to load NIF library";
if (strstr(errdesc.str, lib_name) != NULL) {
- ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str);
+ ret = load_nif_error(c_p, "load_failed", "%s: '%s'", slogan, errdesc.str);
}
else {
- ret = load_nif_error(BIF_P, "load_failed", "%s %s: '%s'", slogan, lib_name, errdesc.str);
+ ret = load_nif_error(c_p, "load_failed", "%s %s: '%s'", slogan, lib_name, errdesc.str);
}
}
else if (init_func == NULL &&
erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) {
- ret = load_nif_error(BIF_P, bad_lib, "Failed to find library init"
+ ret = load_nif_error(c_p, bad_lib, "Failed to find library init"
" function: '%s'", errdesc.str);
}
else if ((taint ? erts_add_taint(mod_atom) : 0,
(entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) {
- ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful");
+ ret = load_nif_error(c_p, bad_lib, "Library init-call unsuccessful");
}
else if (entry->major > ERL_NIF_MAJOR_VERSION
|| (entry->major == ERL_NIF_MAJOR_VERSION
&& entry->minor > ERL_NIF_MINOR_VERSION)) {
char* fmt = "That '%T' NIF library needs %s or newer. Either try to"
" recompile the NIF lib or use a newer erts runtime.";
- ret = load_nif_error(BIF_P, bad_lib, fmt, mod_atom, entry->min_erts);
+ ret = load_nif_error(c_p, bad_lib, fmt, mod_atom, entry->min_erts);
}
else if (entry->major < ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
|| (entry->major==2 && entry->minor == 5)) { /* experimental maps */
char* fmt = "That old NIF library (%d.%d) is not compatible with this "
"erts runtime (%d.%d). Try recompile the NIF lib.";
- ret = load_nif_error(BIF_P, bad_lib, fmt, entry->major, entry->minor,
+ ret = load_nif_error(c_p, bad_lib, fmt, entry->major, entry->minor,
ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
}
else if (AT_LEAST_VERSION(entry, 2, 1)
&& sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) {
- ret = load_nif_error(BIF_P, bad_lib, "Library (%s) not compiled for "
+ ret = load_nif_error(c_p, bad_lib, "Library (%s) not compiled for "
"this vm variant (%s).",
entry->vm_variant, ERL_NIF_VM_VARIANT);
}
else if (!erts_is_atom_str((char*)entry->name, mod_atom, 1)) {
- ret = load_nif_error(BIF_P, bad_lib, "Library module name '%s' does not"
+ ret = load_nif_error(c_p, bad_lib, "Library module name '%s' does not"
" match calling module '%T'", entry->name, mod_atom);
}
else {
@@ -4274,7 +4272,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
|| (ci_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
- ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
+ ret = load_nif_error(c_p,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
}
else if (f->flags) {
@@ -4286,16 +4284,13 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
* a load error.
*/
if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
- ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
+ ret = load_nif_error(c_p, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
f->flags, mod_atom, f->name, f->arity);
}
- else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
- < BEAM_NIF_MIN_FUNC_SZ)
- {
- ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
- " in module (%T:%s/%u too small)",
- mod_atom, f->name, f->arity);
- }
+
+ ASSERT(erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
+ >= BEAM_NATIVE_MIN_FUNC_SZ);
+
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
mod_atom, f->name, f->arity);*/
}
@@ -4314,23 +4309,23 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
void* prev_old_data = prev_mi->nif->priv_data;
if (entry->upgrade == NULL) {
- ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
+ ret = load_nif_error(c_p, upgrade, "Upgrade not supported by this NIF library.");
goto error;
}
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
+ erts_pre_nif(&env, c_p, lib, NULL);
+ veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, args);
erts_post_nif(&env);
if (veto) {
prev_mi->nif->priv_data = prev_old_data;
- ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
+ ret = load_nif_error(c_p, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
}
}
else if (entry->load != NULL) { /********* Initial load ***********/
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
+ erts_pre_nif(&env, c_p, lib, NULL);
+ veto = entry->load(&env, &lib->priv_data, args);
erts_post_nif(&env);
if (veto) {
- ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful (%d).", veto);
+ ret = load_nif_error(c_p, "load", "Library load-call unsuccessful (%d).", veto);
}
}
if (ret == am_ok) {
@@ -4384,8 +4379,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
erts_thr_progress_unblock();
- erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_release_code_write_permission();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
erts_free(ERTS_ALC_T_TMP, lib_name);
BIF_RET(ret);
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index f564549ab9..c0f31e0cb6 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -142,12 +142,6 @@ void monitor_generic(Process *p, Eterm type, Eterm spec);
Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp);
-Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
-Eterm
-erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
- Export* ep, Uint32 flags,
- Uint32 flags_meta, BeamInstr* I,
- ErtsTracer meta_tracer);
void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
#define ERTS_CHK_PEND_TRACE_MSGS(ESDP) \
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 24957c8131..ca16bfd20e 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -129,6 +129,8 @@ export_alloc(struct export_entry* tmpl_e)
obj->info.mfa.module = tmpl->info.mfa.module;
obj->info.mfa.function = tmpl->info.mfa.function;
obj->info.mfa.arity = tmpl->info.mfa.arity;
+ obj->bif_table_index = -1;
+ obj->is_bif_traced = 0;
memset(&obj->trampoline, 0, sizeof(obj->trampoline));
@@ -205,6 +207,8 @@ static struct export_entry* init_template(struct export_templ* templ,
templ->exp.info.mfa.module = m;
templ->exp.info.mfa.function = f;
templ->exp.info.mfa.arity = a;
+ templ->exp.bif_table_index = -1;
+ templ->exp.is_bif_traced = 0;
return &templ->entry;
}
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index 1246446418..0190624f79 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -31,7 +31,13 @@
typedef struct export
{
- void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
+ /* Pointer to code for function. */
+ void* addressv[ERTS_NUM_CODE_IX];
+
+ /* Index into bif_table[], or -1 if not a BIF. */
+ int bif_table_index;
+ /* Non-zero if this is a BIF that's traced. */
+ int is_bif_traced;
/* This is a small trampoline function that can be used for lazy code
* loading, global call tracing, and so on. It's only valid when
@@ -43,11 +49,6 @@ typedef struct export
BeamInstr op; /* Union discriminant. */
struct {
- BeamInstr op; /* op_apply_bif */
- BeamInstr func; /* A direct pointer to the BIF */
- } bif;
-
- struct {
BeamInstr op; /* op_i_generic_breakpoint */
BeamInstr address; /* Address of the original function */
} breakpoint;
@@ -85,9 +86,7 @@ typedef struct export
if((EP)->addressv[CX] == (EP)->trampoline.raw) { \
/* The entry currently points at the trampoline, so the
* instructions must be valid. */ \
- ASSERT(((BeamIsOpCode((EP)->trampoline.op, op_apply_bif)) && \
- (EP)->trampoline.bif.func != 0) || \
- ((BeamIsOpCode((EP)->trampoline.op, op_i_generic_breakpoint)) && \
+ ASSERT(((BeamIsOpCode((EP)->trampoline.op, op_i_generic_breakpoint)) && \
(EP)->trampoline.breakpoint.address != 0) || \
((BeamIsOpCode((EP)->trampoline.op, op_trace_jump_W)) && \
(EP)->trampoline.trace.address != 0) || \
@@ -121,10 +120,6 @@ extern erts_mtx_t export_staging_lock;
#include "beam_load.h" /* For em_* extern declarations */
-#define ExportIsBuiltIn(EntryPtr) \
- (((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->trampoline.raw) && \
- (BeamIsOpCode((EntryPtr)->trampoline.op, op_apply_bif)))
-
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE Export*
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 40c65461bc..acd42319cb 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -122,6 +122,10 @@ void erts_nif_demonitored(ErtsResource* resource);
extern void erts_add_taint(Eterm mod_atom);
extern Eterm erts_nif_taints(Process* p);
extern void erts_print_nif_taints(fmtfn_t to, void* to_arg);
+
+/* Loads the specified NIF. The caller must have code write permission. */
+Eterm erts_load_nif(Process *c_p, BeamInstr *I, Eterm filename, Eterm args);
+
void erts_unload_nif(struct erl_module_nif* nif);
extern void erl_nif_init(void);
extern int erts_nif_get_funcs(struct erl_module_nif*,
@@ -885,6 +889,8 @@ void erts_bif_info_init(void);
/* bif.c */
+void erts_write_bif_wrapper(Export *export, BeamInstr *address);
+
void erts_queue_monitor_message(Process *,
ErtsProcLocks*,
Eterm,
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index c0ca9260a0..a72ed6bce8 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -77,19 +77,32 @@ return
# To ensure that a "move Src x(0)" instruction can be combined with
# the following call instruction, we need to make sure that there is
# no line/1 instruction between the move and the call.
-#
-# A tail-recursive call to an external function (BIF or non-BIF) will
-# never be saved on the stack, so there is no reason to keep the line
-# instruction.
+
+move S X0=x==0 | line Loc | call Ar Func => \
+ line Loc | move S X0 | call Ar Func
move S X0=x==0 | line Loc | call_ext Ar Func => \
line Loc | move S X0 | call_ext Ar Func
+
+#
+# A tail call will not refer to the current function on error unless it's a
+# BIF, so we can omit the line instruction for non-BIFs.
+#
+
+move S X0=x==0 | line Loc | call_ext_last Ar Func=u$is_bif D => \
+ line Loc | move S X0 | call_ext_last Ar Func D
+move S X0=x==0 | line Loc | call_ext_only Ar Func=u$is_bif => \
+ line Loc | move S X0 | call_ext_only Ar Func
+
move S X0=x==0 | line Loc | call_ext_last Ar Func D => \
move S X0 | call_ext_last Ar Func D
move S X0=x==0 | line Loc | call_ext_only Ar Func => \
move S X0 | call_ext_only Ar Func
-move S X0=x==0 | line Loc | call Ar Func => \
- line Loc | move S X0 | call Ar Func
+
+move S X0=x==0 | line Loc | call_last Ar Func D => \
+ move S X0 | call_last Ar Func D
+move S X0=x==0 | line Loc | call_only Ar Func => \
+ move S X0 | call_only Ar Func
line Loc | func_info M F A => func_info M F A | line Loc
@@ -787,62 +800,22 @@ allocate_init t t? y
# External function and bif calls.
#################################################################
-#
-# The BIFs erts_internal:check_process_code/1 must be called like a function,
-# to ensure that c_p->i (program counter) is set correctly (an ordinary
-# BIF call doesn't set it).
-#
-
-call_ext u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext Bif
-call_ext_last u==1 Bif=u$bif:erts_internal:check_process_code/1 D => i_call_ext_last Bif D
-call_ext_only u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext_only Bif
-
-#
-# The BIFs erts_internal:garbage_collect/1 must be called like a function,
-# to allow them to invoke the garbage collector. (The stack pointer must
-# be saved and p->arity must be zeroed, which is not done on ordinary BIF calls.)
-#
-call_ext u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext Bif
-call_ext_last u==1 Bif=u$bif:erts_internal:garbage_collect/1 D => i_call_ext_last Bif D
-call_ext_only u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext_only Bif
+# Expands into call_light_bif(_only)/2
+call_light_bif/1
+call_light_bif_only/1
+call_light_bif_last/2
#
-# put/2 and erase/1 must be able to do garbage collection, so we must call
-# them like functions.
+# The load_nif/2 BIF is an instruction.
#
-call_ext u==2 Bif=u$bif:erlang:put/2 => i_call_ext Bif
-call_ext_last u==2 Bif=u$bif:erlang:put/2 D => i_call_ext_last Bif D
-call_ext_only u==2 Bif=u$bif:erlang:put/2 => i_call_ext_only Bif
-
-call_ext u==1 Bif=u$bif:erlang:erase/1 => i_call_ext Bif
-call_ext_last u==1 Bif=u$bif:erlang:erase/1 D => i_call_ext_last Bif D
-call_ext_only u==1 Bif=u$bif:erlang:erase/1 => i_call_ext_only Bif
-
-#
-# The process_info/1,2 BIF should be called like a function, to force
-# the emulator to set c_p->current before calling it (a BIF call doesn't
-# set it).
-#
-# In addition, we force the use of a non-tail-recursive call. This will ensure
-# that c_p->cp points into the function making the call.
-#
-
-call_ext u==1 Bif=u$bif:erlang:process_info/1 => i_call_ext Bif
-call_ext_last u==1 Bif=u$bif:erlang:process_info/1 D => i_call_ext Bif | deallocate_return D
-call_ext_only Ar=u==1 Bif=u$bif:erlang:process_info/1 => allocate u Ar | i_call_ext Bif | deallocate_return u
-
-call_ext u==2 Bif=u$bif:erlang:process_info/2 => i_call_ext Bif
-call_ext_last u==2 Bif=u$bif:erlang:process_info/2 D => i_call_ext Bif | deallocate_return D
-call_ext_only Ar=u==2 Bif=u$bif:erlang:process_info/2 => allocate u Ar | i_call_ext Bif | deallocate_return u
-
-#
-# load_nif/2 also needs to know calling function like process_info
-#
-call_ext u==2 Bif=u$bif:erlang:load_nif/2 => i_call_ext Bif
-call_ext_last u==2 Bif=u$bif:erlang:load_nif/2 D => i_call_ext Bif | deallocate_return D
-call_ext_only Ar=u==2 Bif=u$bif:erlang:load_nif/2 => allocate u Ar | i_call_ext Bif | deallocate_return u
+call_ext u==2 u$func:erlang:load_nif/2 => i_load_nif
+call_ext_last u==2 u$func:erlang:load_nif/2 D => i_load_nif | deallocate_return D
+call_ext_only u==2 u$func:erlang:load_nif/2 => i_load_nif | return
+%cold
+i_load_nif
+%hot
#
# apply/2 is an instruction, not a BIF.
@@ -861,33 +834,6 @@ call_ext_last u==3 u$bif:erlang:apply/3 D => i_apply_last D
call_ext_only u==3 u$bif:erlang:apply/3 => i_apply_only
#
-# The exit/1 and throw/1 BIFs never execute the instruction following them;
-# thus there is no need to generate any return instruction.
-#
-
-call_ext_last u==1 Bif=u$bif:erlang:exit/1 D => call_bif Bif
-call_ext_last u==1 Bif=u$bif:erlang:throw/1 D => call_bif Bif
-
-call_ext_only u==1 Bif=u$bif:erlang:exit/1 => call_bif Bif
-call_ext_only u==1 Bif=u$bif:erlang:throw/1 => call_bif Bif
-
-#
-# The error/1 and error/2 BIFs never execute the instruction following them;
-# thus there is no need to generate any return instruction.
-# However, they generate stack backtraces, so if the call instruction
-# is call_ext_only/2 instruction, we explicitly do an allocate/2 to store
-# the continuation pointer on the stack.
-#
-
-call_ext_last u==1 Bif=u$bif:erlang:error/1 D => call_bif Bif
-call_ext_last u==2 Bif=u$bif:erlang:error/2 D => call_bif Bif
-
-call_ext_only Ar=u==1 Bif=u$bif:erlang:error/1 => \
- allocate u Ar | call_bif Bif
-call_ext_only Ar=u==2 Bif=u$bif:erlang:error/2 => \
- allocate u Ar | call_bif Bif
-
-#
# The yield/0 BIF is an instruction
#
@@ -999,17 +945,24 @@ call_ext_only u==0 u$func:os:perf_counter/0 => \
i_perf_counter | return
#
-# The general case for BIFs that have no special instructions.
-# A BIF used in the tail must be followed by a return instruction.
+# BIFs like process_info/1,2 require up-to-date information about the current
+# emulator state, which the ordinary call_light_bif instruction doesn't save.
#
-# To make trapping and stack backtraces work correctly, we make sure that
-# the continuation pointer is always stored on the stack.
-call_ext u Bif=u$is_bif => call_bif Bif
+call_ext u Bif=u$is_bif | is_heavy_bif(Bif) => \
+ i_call_ext Bif
+call_ext_last u Bif=u$is_bif D | is_heavy_bif(Bif) => \
+ i_call_ext Bif | deallocate_return D
+call_ext_only Ar=u Bif=u$is_bif | is_heavy_bif(Bif) => \
+ allocate u Ar | i_call_ext Bif | deallocate_return u
-call_ext_last u Bif=u$is_bif D => deallocate D | call_bif_only Bif
+#
+# The general case for BIFs that have no special requirements.
+#
-call_ext_only Ar=u Bif=u$is_bif => call_bif_only Bif
+call_ext u Bif=u$is_bif => call_light_bif Bif
+call_ext_last u Bif=u$is_bif D => call_light_bif_last Bif D
+call_ext_only Ar=u Bif=u$is_bif => call_light_bif_only Bif
#
# Any remaining calls are calls to Erlang functions, not BIFs.
@@ -1034,14 +987,32 @@ i_apply_fun
i_apply_fun_last Q
i_apply_fun_only
+#
+# When a BIF is traced, these instructions make a body call through the export
+# entry instead of calling the BIF directly (setting up a temporary stack frame
+# if needed). We therefore retain the stack frame in call_light_bif_last, and
+# add a deallocate_return after call_light_bif_only to remove the temporary
+# stack frame before returning.
+#
+
+call_light_bif Bif=u$is_bif => \
+ call_light_bif Bif Bif
+
+call_light_bif_last Bif=u$is_bif D => \
+ call_light_bif Bif Bif | deallocate_return D
+
+call_light_bif_only Bif=u$is_bif => \
+ call_light_bif_only Bif Bif | deallocate_return u
+
+call_light_bif b e
+call_light_bif_only b e
+
%cold
-i_hibernate
+i_hibernate
i_perf_counter
-%hot
-call_bif e
-call_bif_only e
+%hot
#
# Calls to non-building and guard BIFs.
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index 742592f88e..477b0f5bb3 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -832,21 +832,27 @@ deep_exception() ->
R1 -> ct:fail({returned,abbr(R1)})
catch error:badarg -> ok
end,
- expect(fun ({trace,S,call,{lists,reverse,[L1,L2]}})
+ expect(fun ({trace,S,call,{lists,reverse,[L1,L2]}}, Traps)
when is_list(L1), is_list(L2), S == Self ->
- next;
+ %% Each trapping call to reverse/2 must have a corresponding
+ %% exception_from
+ {next, Traps + 1};
({trace,S,exception_from,
- {lists,reverse,2},{error,badarg}})
+ {lists,reverse,2},{error,badarg}}, Traps)
+ when S == Self, Traps > 1 ->
+ {next, Traps - 1};
+ ({trace,S,exception_from,
+ {lists,reverse,2},{error,badarg}}, 1)
when S == Self ->
expected;
- ('_') ->
+ ('_', _Traps) ->
{trace,Self,exception_from,
{lists,reverse,2},{error,badarg}};
- (_) ->
+ (_, _Traps) ->
{unexpected,
{trace,Self,exception_from,
{lists,reverse,2},{error,badarg}}}
- end),
+ end, 0),
deep_exception(?LINE, deep_5, [1,2], 7,
[{trace,Self,call,{erlang,error,[undef]}},
{trace,Self,exception_from,{erlang,error,1},
@@ -896,21 +902,27 @@ deep_exception() ->
R2 -> ct:fail({returned,abbr(R2)})
catch error:badarg -> ok
end,
- expect(fun ({trace,S,call,{lists,reverse,[L1,L2]}})
+ expect(fun ({trace,S,call,{lists,reverse,[L1,L2]}}, Traps)
when is_list(L1), is_list(L2), S == Self ->
- next;
+ %% Each trapping call to reverse/2 must have a corresponding
+ %% exception_from
+ {next, Traps + 1};
+ ({trace,S,exception_from,
+ {lists,reverse,2},{error,badarg}}, Traps)
+ when S == Self, Traps > 1 ->
+ {next, Traps - 1};
({trace,S,exception_from,
- {lists,reverse,2},{error,badarg}})
+ {lists,reverse,2},{error,badarg}}, 1)
when S == Self ->
expected;
- ('_') ->
+ ('_', _Traps) ->
{trace,Self,exception_from,
{lists,reverse,2},{error,badarg}};
- (_) ->
+ (_, _Traps) ->
{unexpected,
{trace,Self,exception_from,
{lists,reverse,2},{error,badarg}}}
- end),
+ end, 0),
deep_exception(?LINE, apply, [?MODULE,deep_5,[1,2]], 7,
[{trace,Self,call,{erlang,error,[undef]}},
{trace,Self,exception_from,{erlang,error,1},
@@ -975,21 +987,27 @@ deep_exception() ->
R3 -> ct:fail({returned,abbr(R3)})
catch error:badarg -> ok
end,
- expect(fun ({trace,S,call,{lists,reverse,[L1,L2]}})
+ expect(fun ({trace,S,call,{lists,reverse,[L1,L2]}}, Traps)
when is_list(L1), is_list(L2), S == Self ->
- next;
+ %% Each trapping call to reverse/2 must have a corresponding
+ %% exception_from
+ {next, Traps + 1};
+ ({trace,S,exception_from,
+ {lists,reverse,2},{error,badarg}}, Traps)
+ when S == Self, Traps > 1 ->
+ {next, Traps - 1};
({trace,S,exception_from,
- {lists,reverse,2},{error,badarg}})
+ {lists,reverse,2},{error,badarg}}, 1)
when S == Self ->
expected;
- ('_') ->
+ ('_', _Traps) ->
{trace,Self,exception_from,
{lists,reverse,2},{error,badarg}};
- (_) ->
+ (_, _Traps) ->
{unexpected,
{trace,Self,exception_from,
{lists,reverse,2},{error,badarg}}}
- end),
+ end, 0),
deep_exception(?LINE, apply,
[fun () -> ?MODULE:deep_5(1,2) end, []], 7,
[{trace,Self,call,{erlang,error,[undef]}},
@@ -1249,6 +1267,24 @@ expect(Message) ->
ct:fail(no_trace_message)
end.
+expect(Validator, State0) when is_function(Validator) ->
+ receive
+ M ->
+ case Validator(M, State0) of
+ expected ->
+ ok = io:format("Expected and got ~p", [abbr(M)]);
+ {next, State} ->
+ ok = io:format("Expected and got ~p", [abbr(M)]),
+ expect(Validator, State);
+ {unexpected,Message} ->
+ io:format("Expected ~p; got ~p", [abbr(Message),abbr(M)]),
+ ct:fail({unexpected,abbr([M|flush()])})
+ end
+ after 5000 ->
+ io:format("Expected ~p; got nothing", [abbr(Validator('_'))]),
+ ct:fail(no_trace_message)
+ end.
+
trace_info(What, Key) ->
get(tracer) ! {apply,self(),{erlang,trace_info,[What,Key]}},
Res = receive
diff --git a/erts/emulator/test/dirty_bif_SUITE.erl b/erts/emulator/test/dirty_bif_SUITE.erl
index 4f5ad0295a..2ded862b8a 100644
--- a/erts/emulator/test/dirty_bif_SUITE.erl
+++ b/erts/emulator/test/dirty_bif_SUITE.erl
@@ -397,7 +397,9 @@ dirty_process_trace(Config) when is_list(Config) ->
access_dirty_process(
Config,
fun() ->
- erlang:trace_pattern({erts_debug,dirty_io,2},
+ %% BIFs can only be traced when their modules are loaded.
+ code:ensure_loaded(erts_debug),
+ 1 = erlang:trace_pattern({erts_debug,dirty_io,2},
[{'_',[],[{return_trace}]}],
[local,meta]),
ok
diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl
index 21de6b1002..686b431876 100644
--- a/erts/emulator/test/match_spec_SUITE.erl
+++ b/erts/emulator/test/match_spec_SUITE.erl
@@ -198,7 +198,8 @@ caller_and_return_to(Config) ->
{trace,Tracee,call,{?MODULE,do_the_put,[test]},{?MODULE,do_put,1}},
{trace,Tracee,call,{erlang,integer_to_list,[1]},{?MODULE,do_the_put,1}},
{trace,Tracee,return_to,{?MODULE,do_the_put,1}},
- {trace,Tracee,call,{erlang,put,[test,"1"]},{?MODULE,do_put,1}},
+ {trace,Tracee,call,{erlang,put,[test,"1"]},{?MODULE,do_the_put,1}},
+ {trace,Tracee,return_to,{?MODULE,do_the_put,1}},
{trace,Tracee,return_to,{?MODULE,do_put,1}},
%% These last trace messages are a bit strange...
diff --git a/erts/emulator/test/trace_call_time_SUITE.erl b/erts/emulator/test/trace_call_time_SUITE.erl
index 26f96a1766..9bab4cbbd8 100644
--- a/erts/emulator/test/trace_call_time_SUITE.erl
+++ b/erts/emulator/test/trace_call_time_SUITE.erl
@@ -254,8 +254,7 @@ combo(Config) when is_list(Config) ->
2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, [], [local]),
2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, true, [call_time]),
2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, MetaMs, [{meta,MetaTracer}]),
- %% not implemented
- %2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, true, [call_count]),
+ 2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, true, [call_count]),
1 = erlang:trace(Self, true, [{tracer,LocalTracer} | Flags]),
%%
@@ -284,9 +283,7 @@ combo(Config) when is_list(Config) ->
{value,{match_spec,[]}} = lists:keysearch(match_spec, 1, TraceInfoBif),
{value,{meta, MetaTracer}} = lists:keysearch(meta, 1, TraceInfoBif),
{value,{meta_match_spec,MetaMs}} = lists:keysearch(meta_match_spec, 1, TraceInfoBif),
- %% not implemented
- {value,{call_count,false}} = lists:keysearch(call_count, 1, TraceInfoBif),
- %{value,{call_count,0}} = lists:keysearch(call_count, 1, TraceInfoBif),
+ {value,{call_count,0}} = lists:keysearch(call_count, 1, TraceInfoBif),
{value,{call_time,[]}} = lists:keysearch(call_time, 1, TraceInfoBif),
%%
@@ -429,6 +426,8 @@ called_function(Config) when is_list(Config) ->
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
dead_tracer(Config) when is_list(Config) ->
+ TracedMFAs = dead_tracer_mfas(),
+
Self = self(),
FirstTracer = tracer(),
StartTracing = fun() -> turn_on_tracing(Self) end,
@@ -443,14 +442,14 @@ dead_tracer(Config) when is_list(Config) ->
erlang:yield(),
%% Collect and check that we only get call_time info for the current process.
- Info1 = collect_all_info(),
+ Info1 = collect_all_info(TracedMFAs),
[] = other_than_self(Info1),
io:format("~p\n", [Info1]),
%% Note that we have not turned off tracing for the current process,
%% but that the tracer has terminated. No more call_time information should be recorded.
[1,2,3] = seq(1, 3, fun(I) -> I + 1 end),
- [] = collect_all_info(),
+ [] = collect_all_info(TracedMFAs),
%% When we start a second tracer process, that tracer process must
%% not inherit the tracing flags and the dead tracer (even though
@@ -459,7 +458,7 @@ dead_tracer(Config) when is_list(Config) ->
tell_tracer(SecondTracer, StartTracing),
Seq20 = lists:seq(1, 20),
Seq20 = seq(1, 20, fun(I) -> I + 1 end),
- Info2 = collect_all_info(),
+ Info2 = collect_all_info(TracedMFAs),
io:format("~p\n", [Info2]),
[] = other_than_self(Info2),
SecondTracer ! quit,
@@ -495,9 +494,21 @@ turn_on_tracing(Pid) ->
_ = now(),
ok.
-collect_all_info() ->
- collect_all_info([{?MODULE,F,A} || {F,A} <- module_info(functions)] ++
- erlang:system_info(snifs)).
+%% We want to trace functions local to this module as well as all BIFs, and for
+%% the latter we need to ensure that their modules are loaded.
+dead_tracer_mfas() ->
+ Modules = [M || {M,_F,_A} <- erlang:system_info(snifs)],
+ Whitelist0 = gb_sets:from_list(Modules),
+ Whitelist = case code:ensure_modules_loaded(Modules) of
+ {error, Reasons} ->
+ Blacklist = gb_sets:from_list([M || {M, _} <- Reasons]),
+ gb_sets:subtract(Whitelist0, Blacklist);
+ ok ->
+ Whitelist0
+ end,
+ EligibleSNIFs = [MFA || {M,_F,_A}=MFA <- erlang:system_info(snifs),
+ gb_sets:is_element(M, Whitelist)],
+ [{?MODULE,F,A} || {F,A} <- module_info(functions)] ++ EligibleSNIFs.
collect_all_info([MFA|T]) ->
CallTime = erlang:trace_info(MFA, call_time),
@@ -567,21 +578,29 @@ seq_r(Start, Stop, Succ, R) ->
seq_r(Succ(Start), Stop, Succ, [Start | R]).
% Check call time tracing data and print mismatches
-check_trace_info(Mfa, [{Pid, C,_,_}] = Expect, Time) ->
- case erlang:trace_info(Mfa, call_time) of
- % Time tests are somewhat problematic. We want to know if Time (EXPECTED_TIME) and S*1000000 + Us (ACTUAL_TIME)
- % is the same.
- % If the ratio EXPECTED_TIME/ACTUAL_TIME is ~ 1 or if EXPECTED_TIME - ACTUAL_TIME is near zero, the test is ok.
- {call_time,[{Pid,C,S,Us}]} when S >= 0, Us >= 0, abs(1 - Time/(S*1000000 + Us)) < ?R_ERROR; abs(Time - S*1000000 - Us) < ?US_ERROR ->
+check_trace_info(Mfa, [{Pid, ExpectedC,_,_}] = Expect, Time) ->
+ {call_time,[{Pid,C,S,Us}]} = erlang:trace_info(Mfa, call_time),
+ {Mod, Name, Arity} = Mfa,
+ IsBuiltin = erlang:is_builtin(Mod, Name, Arity),
+ if
+ %% Call count on BIFs may exceed number of calls as they often trap to
+ %% themselves.
+ IsBuiltin, C >= ExpectedC, S >= 0, Us >= 0,
+ abs(1 - Time/(S*1000000 + Us)) < ?R_ERROR;
+ abs(Time - S*1000000 - Us) < ?US_ERROR ->
ok;
- {call_time,[{Pid,C,S,Us}]} ->
+ not IsBuiltin, C =:= ExpectedC, S >= 0, Us >= 0,
+ abs(1 - Time/(S*1000000 + Us)) < ?R_ERROR;
+ abs(Time - S*1000000 - Us) < ?US_ERROR ->
+ ok;
+ true ->
Sum = S*1000000 + Us,
- io:format("Expected ~p -> {call_time, ~p (Time ~p us)}~n - got ~w s. ~w us. = ~w us. - ~w -> delta ~w (ratio ~.2f, should be 1.0)~n",
- [Mfa, Expect, Time, S, Us, Sum, Time, Sum - Time, Time/Sum]),
- time_error;
- Other ->
- io:format("Expected ~p -> {call_time, ~p (Time ~p us)}~n - got ~p~n", [ Mfa, Expect, Time, Other]),
- time_count_error
+ io:format("Expected ~p -> {call_time, ~p (Time ~p us)}~n - got ~w "
+ "s. ~w us. = ~w us. - ~w -> delta ~w (ratio ~.2f, "
+ "should be 1.0)~n",
+ [Mfa, Expect, Time,
+ S, Us, Sum, Time, Sum - Time, Time/Sum]),
+ time_error
end;
check_trace_info(Mfa, Expect, _) ->
case erlang:trace_info(Mfa, call_time) of
@@ -670,9 +689,12 @@ loop() ->
quit ->
ok;
{Pid, execute, Fun } when is_function(Fun) ->
+ %% Make sure we always run with the same amount of reductions.
+ erlang:yield(),
Pid ! {self(), answer, erlang:apply(Fun, [])},
loop();
{Pid, execute, {M, F, A}} ->
+ erlang:yield(),
Pid ! {self(), answer, erlang:apply(M, F, A)},
loop()
end.
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index deee5c2344..ef0df28fc8 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -35,7 +35,6 @@ use File::Basename;
# Output:
# <-src>/erl_am.c
# <-src>/erl_bif_table.c
-# <-src>/erl_bif_wrap.c
# <-src>/erl_dirty_bif_wrap.c
# <-src>/erl_guard_bifs.c
# <-src>/hipe_nbif_impl.c
@@ -92,7 +91,7 @@ while (<>) {
my($type, @args) = split;
if ($type eq 'atom') {
save_atoms(@args);
- } elsif ($type eq 'bif' or $type eq 'ubif' or $type eq 'gcbif') {
+ } elsif ($type eq 'bif' or $type eq 'ubif' or $type eq 'hbif') {
if (@args > 2) {
error("$type only allows two arguments");
}
@@ -124,14 +123,22 @@ while (<>) {
error("invalid sched_type: $sched_type");
}
- my $wrapper;
- if ($type eq 'bif') {
- $wrapper = "wrap_$alias";
- } else {
- $wrapper = $alias;
- }
+ my $kind;
+ if ($type eq 'bif') {
+ $kind = 'BIF_KIND_REGULAR';
+ }
+ elsif ($type eq 'hbif') {
+ $kind = 'BIF_KIND_HEAVY';
+ }
+ elsif ($type eq 'ubif') {
+ $kind = 'BIF_KIND_GUARD';
+ }
+ else {
+ error("invalid bif_type: $type");
+ }
+
push(@bif, ["am_$atom_alias{$mod}","am_$atom_alias{$name}",$arity,
- $alias3,$wrapper,$alias]);
+ $alias3,$alias,$kind]);
push(@bif_info, [$type, $sched_type, $alias3, $alias]);
} elsif ($type eq 'dirty-cpu' or $type eq 'dirty-io'
or $type eq 'dirty-cpu-test' or $type eq 'dirty-io-test') {
@@ -196,7 +203,7 @@ open_file("$include/erl_bif_list.h");
my $i;
for ($i = 0; $i < @bif; $i++) {
# module atom, function atom, arity, C function, table index
- print "BIF_LIST($bif[$i]->[0],$bif[$i]->[1],$bif[$i]->[2],$bif[$i]->[3],$bif[$i]->[5],$i)\n";
+ print "BIF_LIST($bif[$i]->[0],$bif[$i]->[1],$bif[$i]->[2],$bif[$i]->[3],$bif[$i]->[4],$i)\n";
}
#
@@ -208,15 +215,24 @@ my $bif_size = @bif;
print <<EOF;
#ifndef __ERL_BIF_TABLE_H__
#define __ERL_BIF_TABLE_H__
+
+#include "sys.h"
+
typedef void *BifFunction;
+typedef enum {
+ BIF_KIND_REGULAR,
+ BIF_KIND_HEAVY,
+ BIF_KIND_GUARD
+} BifKind;
+
typedef struct bif_entry {
Eterm module;
Eterm name;
int arity;
BifFunction f;
- BifFunction traced;
BifFunction impl;
+ BifKind kind;
} BifEntry;
typedef struct erts_gc_bif {
@@ -232,7 +248,6 @@ typedef struct erts_u_bif {
extern BifEntry bif_table[];
extern Export* bif_export[];
-extern const ErtsGcBif erts_gc_bifs[];
extern const ErtsUBif erts_u_bifs[];
#define BIF_SIZE $bif_size
@@ -250,7 +265,6 @@ for ($i = 0; $i < @bif; $i++) {
my $args = join(', ', 'Process*', 'Eterm*', 'UWord*');
my $name = $bif_info[$i]->[3];
print "Eterm $name($args);\n";
- print "Eterm wrap_$name($args);\n";
print "Eterm erts_gc_$name(Process* p, Eterm* reg, Uint live);\n"
if $bif_info[$i]->[0] eq 'gcbif';
print "Eterm $bif_info[$i]->[2]($args);\n"
@@ -283,25 +297,6 @@ for ($i = 0; $i < @bif; $i++) {
print "};\n\n";
#
-# Generate the bif wrappers file.
-#
-
-open_file("$src/erl_bif_wrap.c");
-my $i;
-includes("export.h", "sys.h", "erl_vm.h", "global.h", "erl_process.h", "bif.h",
- "erl_bif_table.h", "erl_atom_table.h");
-for ($i = 0; $i < @bif; $i++) {
- next if $bif[$i]->[3] eq $bif[$i]->[4]; # Skip unwrapped bifs
- my $arity = $bif[$i]->[2];
- my $func = $bif_info[$i]->[3];
- print "Eterm\n";
- print "wrap_$func(Process* p, Eterm* args, UWord* I)\n";
- print "{\n";
- print " return erts_bif_trace($i, p, args, I);\n";
- print "}\n\n";
-}
-
-#
# Generate erl_gc_bifs.c.
#
@@ -309,18 +304,11 @@ open_file("$src/erl_guard_bifs.c");
my $i;
includes("export.h", "sys.h", "erl_vm.h", "global.h", "erl_process.h", "bif.h",
"erl_bif_table.h");
-print "const ErtsGcBif erts_gc_bifs[] = {\n";
-for ($i = 0; $i < @bif; $i++) {
- next unless $bif_info[$i]->[0] eq 'gcbif';
- print " {$bif[$i]->[3], erts_gc_$bif[$i]->[3], BIF_$bif[$i]->[5]},\n";
-}
-print " {NULL, NULL, -1}\n";
-print "};\n";
print "const ErtsUBif erts_u_bifs[] = {\n";
for ($i = 0; $i < @bif; $i++) {
next unless $bif_info[$i]->[0] eq 'ubif';
- print " {$bif[$i]->[3], BIF_$bif[$i]->[5]},\n";
+ print " {$bif[$i]->[3], BIF_$bif[$i]->[4]},\n";
}
print " {NULL, -1}\n";
print "};\n";
@@ -368,7 +356,7 @@ EOF
my $i;
for ($i = 0; $i < @bif; $i++) {
print <<EOF;
-Eterm nbif_impl_$bif[$i]->[5](Process *c_p, Eterm *regs);
+Eterm nbif_impl_$bif[$i]->[4](Process *c_p, Eterm *regs);
EOF
}
@@ -388,9 +376,9 @@ EOF
for ($i = 0; $i < @bif; $i++) {
print <<EOF;
-Eterm nbif_impl_$bif[$i]->[5](Process *c_p, Eterm *regs)
+Eterm nbif_impl_$bif[$i]->[4](Process *c_p, Eterm *regs)
{
- return $bif[$i]->[3](c_p, regs, (UWord *) bif_export\[BIF_$bif[$i]->[5]\]);
+ return $bif[$i]->[3](c_p, regs, (UWord *) bif_export\[BIF_$bif[$i]->[4]\]);
}
EOF