summaryrefslogtreecommitdiff
path: root/asmrun
diff options
context:
space:
mode:
Diffstat (limited to 'asmrun')
-rw-r--r--asmrun/Makefile3
-rw-r--r--asmrun/amd64.S44
-rw-r--r--asmrun/backtrace.c175
-rw-r--r--asmrun/signals_asm.c2
-rw-r--r--asmrun/signals_osdep.h2
5 files changed, 164 insertions, 62 deletions
diff --git a/asmrun/Makefile b/asmrun/Makefile
index 89adaa2e0c..5ebf7aadbd 100644
--- a/asmrun/Makefile
+++ b/asmrun/Makefile
@@ -81,6 +81,9 @@ install-prof:
cp libasmrunp.a $(LIBDIR)/libasmrunp.a
cd $(LIBDIR); $(RANLIB) libasmrunp.a
+power-bsd_elf.S: power-elf.S
+ cp power-elf.S power-bsd_elf.S
+
power.o: power-$(SYSTEM).o
cp power-$(SYSTEM).o power.o
diff --git a/asmrun/amd64.S b/asmrun/amd64.S
index 4a5218f269..aed5a964fb 100644
--- a/asmrun/amd64.S
+++ b/asmrun/amd64.S
@@ -74,7 +74,7 @@
#endif
#ifdef WITH_FRAME_POINTERS
-
+
#define ENTER_FUNCTION \
pushq %rbp; CFI_ADJUST(8); \
movq %rsp, %rbp
@@ -89,7 +89,7 @@
addq $8, %rsp; CFI_ADJUST (-8);
#endif
-
+
#if defined(__PIC__) && !defined(SYS_mingw64)
/* Position-independent operations on global variables. */
@@ -135,7 +135,7 @@
/* Load address of global [label] in register [dst]. */
#define LEA_VAR(label,dst) \
- movq GREL(label)(%rip), dst
+ movq GREL(label)(%rip), dst
#else
@@ -166,7 +166,7 @@
STORE_VAR(%rax,caml_bottom_of_stack)
#define LEA_VAR(label,dst) \
- leaq G(label)(%rip), dst
+ leaq G(label)(%rip), dst
#endif
/* Save and restore all callee-save registers on stack.
@@ -273,9 +273,9 @@ LBL(caml_call_gc):
#endif
/* Build array of registers, save it into caml_gc_regs */
#ifdef WITH_FRAME_POINTERS
- ENTER_FUNCTION ;
+ ENTER_FUNCTION ;
#else
- pushq %rbp; CFI_ADJUST(8);
+ pushq %rbp; CFI_ADJUST(8);
#endif
pushq %r11; CFI_ADJUST (8);
pushq %r10; CFI_ADJUST (8);
@@ -349,9 +349,9 @@ LBL(caml_call_gc):
popq %r10; CFI_ADJUST(-8)
popq %r11; CFI_ADJUST(-8)
#ifdef WITH_FRAME_POINTERS
- LEAVE_FUNCTION
+ LEAVE_FUNCTION
#else
- popq %rbp; CFI_ADJUST(-8);
+ popq %rbp; CFI_ADJUST(-8);
#endif
/* Return to caller */
ret
@@ -366,11 +366,11 @@ LBL(caml_alloc1):
ret
LBL(100):
RECORD_STACK_FRAME(0)
- ENTER_FUNCTION
+ ENTER_FUNCTION
/* subq $8, %rsp; CFI_ADJUST (8); */
call LBL(caml_call_gc)
/* addq $8, %rsp; CFI_ADJUST (-8); */
- LEAVE_FUNCTION
+ LEAVE_FUNCTION
jmp LBL(caml_alloc1)
CFI_ENDPROC
@@ -383,11 +383,11 @@ LBL(caml_alloc2):
ret
LBL(101):
RECORD_STACK_FRAME(0)
- ENTER_FUNCTION
+ ENTER_FUNCTION
/* subq $8, %rsp; CFI_ADJUST (8); */
call LBL(caml_call_gc)
/* addq $8, %rsp; CFI_ADJUST (-8); */
- LEAVE_FUNCTION
+ LEAVE_FUNCTION
jmp LBL(caml_alloc2)
CFI_ENDPROC
@@ -400,11 +400,11 @@ LBL(caml_alloc3):
ret
LBL(102):
RECORD_STACK_FRAME(0)
- ENTER_FUNCTION
+ ENTER_FUNCTION
/* subq $8, %rsp; CFI_ADJUST (8) */
call LBL(caml_call_gc)
/* addq $8, %rsp; CFI_ADJUST (-8) */
- LEAVE_FUNCTION
+ LEAVE_FUNCTION
jmp LBL(caml_alloc3)
CFI_ENDPROC
@@ -420,12 +420,12 @@ LBL(caml_allocN):
LBL(103):
RECORD_STACK_FRAME(8)
#ifdef WITH_FRAME_POINTERS
- /* Do we need 16-byte alignment here ? */
- ENTER_FUNCTION
+ /* Do we need 16-byte alignment here ? */
+ ENTER_FUNCTION
#endif
call LBL(caml_call_gc)
#ifdef WITH_FRAME_POINTERS
- LEAVE_FUNCTION
+ LEAVE_FUNCTION
#endif
popq %rax; CFI_ADJUST(-8) /* recover desired size */
jmp LBL(caml_allocN)
@@ -535,8 +535,8 @@ LBL(110):
movq %rax, %r12 /* Save exception bucket */
movq %rax, C_ARG_1 /* arg 1: exception bucket */
#ifdef WITH_FRAME_POINTERS
- ENTER_FUNCTION
- movq 8(%rsp), C_ARG_2 /* arg 2: pc of raise */
+ ENTER_FUNCTION
+ movq 8(%rsp), C_ARG_2 /* arg 2: pc of raise */
leaq 16(%rsp), C_ARG_3 /* arg 3: sp at raise */
#else
popq C_ARG_2 /* arg 2: pc of raise */
@@ -544,7 +544,7 @@ LBL(110):
#endif
movq %r14, C_ARG_4 /* arg 4: sp of handler */
/* PR#5700: thanks to popq above, stack is now 16-aligned */
- /* Thanks to ENTER_FUNCTION, stack is now 16-aligned */
+ /* Thanks to ENTER_FUNCTION, stack is now 16-aligned */
PREPARE_FOR_C_CALL /* no need to cleanup after */
call GCALL(caml_stash_backtrace)
movq %r12, %rax /* Recover exception bucket */
@@ -566,7 +566,7 @@ CFI_STARTPROC
ret
LBL(111):
#ifdef WITH_FRAME_POINTERS
- ENTER_FUNCTION ;
+ ENTER_FUNCTION ;
#endif
movq C_ARG_1, %r12 /* Save exception bucket */
/* arg 1: exception bucket */
@@ -587,7 +587,7 @@ CFI_ENDPROC
/* Raise a Stack_overflow exception on return from segv_handler()
(in asmrun/signals_asm.c). On entry, the stack is full, so we
- cannot record a backtrace.
+ cannot record a backtrace.
No CFI information here since this function disrupts the stack
backtrace anyway. */
diff --git a/asmrun/backtrace.c b/asmrun/backtrace.c
index 3ca182413f..3854967cf4 100644
--- a/asmrun/backtrace.c
+++ b/asmrun/backtrace.c
@@ -55,56 +55,75 @@ CAMLprim value caml_backtrace_status(value vunit)
return Val_bool(caml_backtrace_active);
}
-/* Store the return addresses contained in the given stack fragment
- into the backtrace array */
+/* returns the next frame descriptor (or NULL if none is available),
+ and updates *pc and *sp to point to the following one. */
-void caml_stash_backtrace(value exn, uintnat pc, char * sp, char * trapsp)
+frame_descr * caml_next_frame_descriptor(uintnat * pc, char ** sp)
{
frame_descr * d;
uintnat h;
- if (exn != caml_backtrace_last_exn) {
- caml_backtrace_pos = 0;
- caml_backtrace_last_exn = exn;
- }
- if (caml_backtrace_buffer == NULL) {
- caml_backtrace_buffer = malloc(BACKTRACE_BUFFER_SIZE * sizeof(code_t));
- if (caml_backtrace_buffer == NULL) return;
- }
if (caml_frame_descriptors == NULL) caml_init_frame_descriptors();
while (1) {
- /* Find the descriptor corresponding to the return address */
- h = Hash_retaddr(pc);
- while(1) {
+ h = Hash_retaddr(*pc);
+ while (1) {
d = caml_frame_descriptors[h];
- if (d == 0) return; /* can happen if some code not compiled with -g */
- if (d->retaddr == pc) break;
+ if (d == 0) return NULL; /* can happen if some code compiled without -g */
+ if (d->retaddr == *pc) break;
h = (h+1) & caml_frame_descriptors_mask;
}
/* Skip to next frame */
if (d->frame_size != 0xFFFF) {
- /* Regular frame, store its descriptor in the backtrace buffer */
- if (caml_backtrace_pos >= BACKTRACE_BUFFER_SIZE) return;
- caml_backtrace_buffer[caml_backtrace_pos++] = (code_t) d;
+ /* Regular frame, update sp/pc and return the frame descriptor */
#ifndef Stack_grows_upwards
- sp += (d->frame_size & 0xFFFC);
+ *sp += (d->frame_size & 0xFFFC);
#else
- sp -= (d->frame_size & 0xFFFC);
+ *sp -= (d->frame_size & 0xFFFC);
#endif
- pc = Saved_return_address(sp);
+ *pc = Saved_return_address(*sp);
#ifdef Mask_already_scanned
- pc = Mask_already_scanned(pc);
+ *pc = Mask_already_scanned(*pc);
#endif
+ return d;
} else {
/* Special frame marking the top of a stack chunk for an ML callback.
Skip C portion of stack and continue with next ML stack chunk. */
- struct caml_context * next_context = Callback_link(sp);
- sp = next_context->bottom_of_stack;
- pc = next_context->last_retaddr;
+ struct caml_context * next_context = Callback_link(*sp);
+ *sp = next_context->bottom_of_stack;
+ *pc = next_context->last_retaddr;
/* A null sp means no more ML stack chunks; stop here. */
- if (sp == NULL) return;
+ if (*sp == NULL) return NULL;
}
+ }
+}
+
+/* Stores the return addresses contained in the given stack fragment
+ into the backtrace array ; this version is performance-sensitive as
+ it is called at each [raise] in a program compiled with [-g], so we
+ preserved the global, statically bounded buffer of the old
+ implementation -- before the more flexible
+ [caml_get_current_callstack] was implemented. */
+
+void caml_stash_backtrace(value exn, uintnat pc, char * sp, char * trapsp)
+{
+ if (exn != caml_backtrace_last_exn) {
+ caml_backtrace_pos = 0;
+ caml_backtrace_last_exn = exn;
+ }
+ if (caml_backtrace_buffer == NULL) {
+ caml_backtrace_buffer = malloc(BACKTRACE_BUFFER_SIZE * sizeof(code_t));
+ if (caml_backtrace_buffer == NULL) return;
+ }
+
+ /* iterate on each frame */
+ while (1) {
+ frame_descr * descr = caml_next_frame_descriptor(&pc, &sp);
+ if (descr == NULL) return;
+ /* store its descriptor in the backtrace buffer */
+ if (caml_backtrace_pos >= BACKTRACE_BUFFER_SIZE) return;
+ caml_backtrace_buffer[caml_backtrace_pos++] = (code_t) descr;
+
/* Stop when we reach the current exception handler */
#ifndef Stack_grows_upwards
if (sp > trapsp) return;
@@ -114,6 +133,67 @@ void caml_stash_backtrace(value exn, uintnat pc, char * sp, char * trapsp)
}
}
+/* Stores upto [max_frames_value] frames of the current call stack to
+ return to the user. This is used not in an exception-raising
+ context, but only when the user requests to save the trace
+ (hopefully less often). Instead of using a bounded buffer as
+ [caml_stash_backtrace], we first traverse the stack to compute the
+ right size, then allocate space for the trace. */
+
+CAMLprim value caml_get_current_callstack(value max_frames_value) {
+ CAMLparam1(max_frames_value);
+ CAMLlocal1(trace);
+
+ /* we use `intnat` here because, were it only `int`, passing `max_int`
+ from the OCaml side would overflow on 64bits machines. */
+ intnat max_frames = Long_val(max_frames_value);
+ intnat trace_size;
+
+ /* first compute the size of the trace */
+ {
+ uintnat pc = caml_last_return_address;
+ /* note that [caml_bottom_of_stack] always points to the most recent
+ * frame, independently of the [Stack_grows_upwards] setting */
+ char * sp = caml_bottom_of_stack;
+ char * limitsp = caml_top_of_stack;
+
+ trace_size = 0;
+ while (1) {
+ frame_descr * descr = caml_next_frame_descriptor(&pc, &sp);
+ if (descr == NULL) break;
+ if (trace_size >= max_frames) break;
+ ++trace_size;
+
+#ifndef Stack_grows_upwards
+ if (sp > limitsp) break;
+#else
+ if (sp < limitsp) break;
+#endif
+ }
+ }
+
+ trace = caml_alloc((mlsize_t) trace_size, Abstract_tag);
+
+ /* then collect the trace */
+ {
+ uintnat pc = caml_last_return_address;
+ char * sp = caml_bottom_of_stack;
+ intnat trace_pos;
+
+ for (trace_pos = 0; trace_pos < trace_size; trace_pos++) {
+ frame_descr * descr = caml_next_frame_descriptor(&pc, &sp);
+ Assert(descr != NULL);
+ /* The assignment below is safe without [caml_initialize], even
+ if the trace is large and allocated on the old heap, because
+ we assign values that are outside the OCaml heap. */
+ Assert(!(Is_block((value) descr) && Is_in_heap((value) descr)));
+ Field(trace, trace_pos) = (value) descr;
+ }
+ }
+
+ CAMLreturn(trace);
+}
+
/* Extract location information for the given frame descriptor */
struct loc_info {
@@ -163,22 +243,41 @@ static void extract_location_info(frame_descr * d,
li->loc_endchr = ((info2 & 0xF) << 6) | (info1 >> 26);
}
+/* Print location information -- same behavior as in Printexc
+
+ note that the test for compiler-inserted raises is slightly redundant:
+ (!li->loc_valid && li->loc_is_raise)
+ extract_location_info above guarantees that when li->loc_valid is
+ 0, then li->loc_is_raise is always 1, so the latter test is
+ useless. We kept it to keep code identical to the byterun/
+ implementation. */
+
static void print_location(struct loc_info * li, int index)
{
char * info;
/* Ignore compiler-inserted raise */
- if (!li->loc_valid) return;
-
- if (index == 0)
- info = "Raised at";
- else if (li->loc_is_raise)
- info = "Re-raised at";
- else
- info = "Called from";
- fprintf (stderr, "%s file \"%s\", line %d, characters %d-%d\n",
- info, li->loc_filename, li->loc_lnum,
- li->loc_startchr, li->loc_endchr);
+ if (!li->loc_valid && li->loc_is_raise) return;
+
+ if (li->loc_is_raise) {
+ /* Initial raise if index == 0, re-raise otherwise */
+ if (index == 0)
+ info = "Raised at";
+ else
+ info = "Re-raised at";
+ } else {
+ if (index == 0)
+ info = "Raised by primitive operation at";
+ else
+ info = "Called from";
+ }
+ if (! li->loc_valid) {
+ fprintf(stderr, "%s unknown location\n", info);
+ } else {
+ fprintf (stderr, "%s file \"%s\", line %d, characters %d-%d\n",
+ info, li->loc_filename, li->loc_lnum,
+ li->loc_startchr, li->loc_endchr);
+ }
}
/* Print a backtrace */
diff --git a/asmrun/signals_asm.c b/asmrun/signals_asm.c
index 9bc86cfdc5..4f62bd38a9 100644
--- a/asmrun/signals_asm.c
+++ b/asmrun/signals_asm.c
@@ -215,7 +215,7 @@ DECLARE_SIGNAL_HANDLER(segv_handler)
) {
#ifdef RETURN_AFTER_STACK_OVERFLOW
/* Tweak the PC part of the context so that on return from this
- handler, we jump to the asm function [caml_stack_overflow]
+ handler, we jump to the asm function [caml_stack_overflow]
(from $ARCH.S). */
#ifdef CONTEXT_PC
CONTEXT_PC = (context_reg) &caml_stack_overflow;
diff --git a/asmrun/signals_osdep.h b/asmrun/signals_osdep.h
index 5e07b2c2df..ff1984754a 100644
--- a/asmrun/signals_osdep.h
+++ b/asmrun/signals_osdep.h
@@ -237,7 +237,7 @@
/****************** PowerPC, BSD */
-#elif defined(TARGET_power) && defined(SYS_bsd)
+#elif defined(TARGET_power) && (defined(SYS_bsd) || defined(SYS_bsd_elf))
#define DECLARE_SIGNAL_HANDLER(name) \
static void name(int sig, int code, struct sigcontext * context)