summaryrefslogtreecommitdiff
path: root/libgcc/config/rs6000/aix-unwind.h
diff options
context:
space:
mode:
Diffstat (limited to 'libgcc/config/rs6000/aix-unwind.h')
-rw-r--r--libgcc/config/rs6000/aix-unwind.h206
1 files changed, 202 insertions, 4 deletions
diff --git a/libgcc/config/rs6000/aix-unwind.h b/libgcc/config/rs6000/aix-unwind.h
index 9e126595edb..90431a4e3d7 100644
--- a/libgcc/config/rs6000/aix-unwind.h
+++ b/libgcc/config/rs6000/aix-unwind.h
@@ -22,14 +22,21 @@
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
+/* Useful register numbers. */
+
+#define LR_REGNO 65
+#define CR2_REGNO 70
+#define XER_REGNO 76
+#define FIRST_ALTIVEC_REGNO 77
+#define VRSAVE_REGNO 109
+#define VSCR_REGNO 110
+
/* If the current unwind info (FS) does not contain explicit info
saving R2, then we have to do a minor amount of code reading to
figure out if it was saved. The big problem here is that the
code that does the save/restore is generated by the linker, so
we have no good way to determine at compile time what to do. */
-#define R_LR 65
-
#ifdef __64BIT__
#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \
do { \
@@ -37,7 +44,7 @@
{ \
unsigned int *insn \
= (unsigned int *) \
- _Unwind_GetGR ((CTX), R_LR); \
+ _Unwind_GetGR ((CTX), LR_REGNO); \
if (*insn == 0xE8410028) \
_Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 40); \
} \
@@ -49,9 +56,200 @@
{ \
unsigned int *insn \
= (unsigned int *) \
- _Unwind_GetGR ((CTX), R_LR); \
+ _Unwind_GetGR ((CTX), LR_REGNO); \
if (*insn == 0x80410014) \
_Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 20); \
} \
} while (0)
#endif
+
+/* Now on to MD_FALLBACK_FRAME_STATE_FOR.
+ 32bit AIX 5.2 and 5.3 only at this stage. */
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <signal.h>
+#include <sys/machine.h>
+
+#ifdef __64BIT__
+
+/* 64bit fallback not implemented yet, so MD_FALLBACK_FRAME_STATE_FOR not
+ defined. Arrange just for the code below to compile. */
+typedef struct __context64 mstate_t;
+
+#else
+
+typedef struct mstsave mstate_t;
+
+#define MD_FALLBACK_FRAME_STATE_FOR ppc_aix_fallback_frame_state
+
+#endif
+
+/* If we are compiling on AIX < 5.3, the VMX related datastructs are not
+ defined and we take measures to obtain proper runtime behavior if the
+ compiled code happens to run on a later version with VMX enabled. */
+
+#ifndef MSR_VMX
+#define MSR_VMX 0x2000000
+#endif
+
+typedef unsigned int uint;
+typedef struct { uint v[4]; } vreg_t;
+typedef struct {
+ vreg_t regs[32];
+ uint pad1 [3];
+ uint vscr;
+ uint vrsave;
+ uint pad2 [3];
+} vstate_t;
+
+#define EXT_CONTEXT_MARK 0x45435458
+#define EXT_CONTEXT_SIZE 4096
+#define BUMPER_SIZE (EXT_CONTEXT_SIZE - sizeof(vstate_t) - (5 * sizeof(int)))
+
+typedef struct {
+ uint pad1 [4];
+ vstate_t vstate;
+ char bumper [BUMPER_SIZE];
+ int mark;
+} extended_context_t;
+
+typedef struct {
+ char bumper [offsetof (ucontext_t, uc_stack) + sizeof (stack_t)];
+ extended_context_t * ectx;
+ int mark;
+} vmx_ucontext_t;
+
+/* Determine whether CONTEXT designates a signal handler, and return the
+ associated ucontext_t address if so. Return NULL otherwise. */
+
+static ucontext_t *
+ucontext_for (struct _Unwind_Context *context)
+{
+ const unsigned int * ra = context->ra;
+
+ /* AIX 5.2 and 5.3, threaded or not, share common patterns and feature
+ variants depending on the configured kernel (unix_mp or unix_64). */
+
+ if (*(ra - 5) == 0x4c00012c /* isync */
+ && *(ra - 4) == 0x80ec0000 /* lwz r7,0(r12) */
+ && *(ra - 3) == 0x804c0004 /* lwz r2,4(r12) */
+ && *(ra - 2) == 0x7ce903a6 /* mtctr r7 */
+ && *(ra - 1) == 0x4e800421 /* bctrl */
+ && *(ra - 0) == 0x7dc37378) /* mr r3,r14 <-- context->ra */
+ {
+ /* unix_64 */
+ if (*(ra - 6) == 0x7d000164) /* mtmsrd r8 */
+ {
+ switch (*(ra + 18))
+ {
+ /* AIX 5.2 */
+ case 0x835a0520: /* lwz r26,1312(r26) */
+ return (ucontext_t *)(context->cfa + 0x70);
+
+ /* AIX 5.3 */
+ case 0x835a0570: /* lwz r26,1392(r26) */
+ return (ucontext_t *)(context->cfa + 0x40);
+
+ default:
+ return 0;
+ }
+ }
+
+ /* unix_mp */
+ if (*(ra - 6) == 0x7d000124) /* mtmsr r8 */
+ {
+ typedef struct {
+ char pad[56];
+ ucontext_t ucontext;
+ siginfo_t siginfo;
+ } aix52_stack_t;
+
+ aix52_stack_t * frame = (aix52_stack_t *) context->cfa;
+ return &frame->ucontext;
+ }
+ }
+
+ return 0;
+}
+
+/* The fallback proper. */
+
+#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
+#define RETURN_COLUMN DWARF_ALT_FRAME_RETURN_COLUMN
+#else
+#define RETURN_COLUMN ARG_POINTER_REGNUM
+#endif
+
+#define REGISTER_CFA_OFFSET_FOR(FS,REGNO,ADDR,CFA)\
+do { \
+(FS)->regs.reg[REGNO].how = REG_SAVED_OFFSET; \
+(FS)->regs.reg[REGNO].loc.offset = (long) (ADDR) - (CFA); \
+} while (0);
+
+static _Unwind_Reason_Code
+ppc_aix_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ ucontext_t * uctx = ucontext_for (context);
+ mstate_t * mctx;
+
+ long new_cfa;
+ int i;
+
+ if (uctx == NULL)
+ return _URC_END_OF_STACK;
+
+ mctx = &uctx->uc_mcontext.jmp_context;
+
+ /* The "kernel" frame cfa is the stack pointer at the signal occurrence
+ point. */
+ new_cfa = mctx->gpr[STACK_POINTER_REGNUM];
+
+ fs->regs.cfa_how = CFA_REG_OFFSET;
+ fs->regs.cfa_reg = STACK_POINTER_REGNUM;
+ fs->regs.cfa_offset = new_cfa - (long) context->cfa;
+
+ /* And we state how to find the various registers it has saved with
+ relative offset rules from there. */
+
+ for (i = 0; i < 32; i++)
+ if (i != STACK_POINTER_REGNUM)
+ REGISTER_CFA_OFFSET_FOR (fs, i, &mctx->gpr[i], new_cfa);
+
+ REGISTER_CFA_OFFSET_FOR (fs, CR2_REGNO, &mctx->cr, new_cfa);
+ REGISTER_CFA_OFFSET_FOR (fs, XER_REGNO, &mctx->xer, new_cfa);
+ REGISTER_CFA_OFFSET_FOR (fs, LR_REGNO, &mctx->lr, new_cfa);
+
+ fs->retaddr_column = RETURN_COLUMN;
+ REGISTER_CFA_OFFSET_FOR (fs, RETURN_COLUMN, &mctx->iar, new_cfa);
+ fs->signal_frame = 1;
+
+ /* Honor FP Ever Used ... */
+ if (mctx->fpeu)
+ {
+ for (i = 0; i < 32; i++)
+ REGISTER_CFA_OFFSET_FOR (fs, i+32, &mctx->fpr[i], new_cfa);
+ }
+
+ /* Honor VMX context, if any. We expect the msr bit never to be set in
+ environments where there is no VMX support, e.g. on AIX < 5.3. */
+ if (mctx->msr & MSR_VMX)
+ {
+ vmx_ucontext_t * uc = (vmx_ucontext_t *) uctx;
+
+ if (uc->mark == EXT_CONTEXT_MARK && uc->ectx->mark == EXT_CONTEXT_MARK)
+ {
+ vstate_t * vstate = &uc->ectx->vstate;
+
+ for (i = 0; i < 32; i++)
+ REGISTER_CFA_OFFSET_FOR
+ (fs, i+FIRST_ALTIVEC_REGNO, &vstate->regs[i], new_cfa);
+
+ REGISTER_CFA_OFFSET_FOR (fs, VSCR_REGNO, &vstate->vscr, new_cfa);
+ REGISTER_CFA_OFFSET_FOR (fs, VRSAVE_REGNO, &vstate->vrsave, new_cfa);
+ }
+ }
+
+ return _URC_NO_REASON;
+}