From f259a6f6deff9bdc7dd998b8b6bdbb4c166b33c3 Mon Sep 17 00:00:00 2001 From: zhangwenlong Date: Wed, 25 May 2022 09:31:08 +0800 Subject: Support loongarch64 (#678) * update config.{guess,sub} * Support loongarch64 Co-Authored-By: Cheng Lulu Co-Authored-By: Xi Ruoyao Co-Authored-By: Xu Hao Co-Authored-By: Zhang Wenlong Co-Authored-By: Pan Xuefeng Co-authored-by: panxuefeng Co-authored-by: Cheng Lulu Co-authored-by: Xi Ruoyao --- src/loongarch64/ffi.c | 595 ++++++++++++++++++++++++++++++++++++++++++++ src/loongarch64/ffitarget.h | 82 ++++++ src/loongarch64/sysv.S | 296 ++++++++++++++++++++++ 3 files changed, 973 insertions(+) create mode 100644 src/loongarch64/ffi.c create mode 100644 src/loongarch64/ffitarget.h create mode 100644 src/loongarch64/sysv.S (limited to 'src') diff --git a/src/loongarch64/ffi.c b/src/loongarch64/ffi.c new file mode 100644 index 0000000..7a28892 --- /dev/null +++ b/src/loongarch64/ffi.c @@ -0,0 +1,595 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2022 Xu Chenghua + 2022 Cheng Lulu + Based on RISC-V port + + LoongArch Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#if defined(__loongarch_soft_float) +# define ABI_FRLEN 0 +#elif defined(__loongarch_single_float) +# define ABI_FRLEN 32 +# define ABI_FLOAT float +#elif defined(__loongarch_double_float) +# define ABI_FRLEN 64 +# define ABI_FLOAT double +#else +#error unsupported LoongArch floating-point ABI +#endif + +#define NARGREG 8 +#define STKALIGN 16 +#define MAXCOPYARG (2 * sizeof (double)) + +/* call_context registers + - 8 floating point parameter/result registers. + - 8 integer parameter/result registers. + - 2 registers used by the assembly code to in-place construct its own + stack frame + - frame register + - return register +*/ +typedef struct call_context +{ + ABI_FLOAT fa[8]; + size_t a[10]; +} call_context; + +typedef struct call_builder +{ + call_context *aregs; + int used_integer; + int used_float; + size_t *used_stack; + size_t *stack; + size_t next_struct_area; +} call_builder; + +/* Integer (not pointer) less than ABI GRLEN. */ +/* FFI_TYPE_INT does not appear to be used. */ +#if __SIZEOF_POINTER__ == 8 +# define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT64) +#else +# define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT32) +#endif + +#if ABI_FRLEN +typedef struct float_struct_info +{ + char as_elements; + char type1; + char offset2; + char type2; +} float_struct_info; + +#if ABI_FRLEN >= 64 +# define IS_FLOAT(type) ((type) >= FFI_TYPE_FLOAT && (type) <= FFI_TYPE_DOUBLE) +#else +# define IS_FLOAT(type) ((type) == FFI_TYPE_FLOAT) +#endif + +static ffi_type ** +flatten_struct (ffi_type *in, ffi_type **out, ffi_type **out_end) +{ + int i; + + if (out == out_end) + return out; + if (in->type != FFI_TYPE_STRUCT) + *(out++) = in; + else + for (i = 0; in->elements[i]; i++) + out = flatten_struct (in->elements[i], out, out_end); + return out; +} + +/* Structs with at most two fields after flattening, one of which is of + floating point type, are passed in multiple registers if sufficient + registers are available. */ +static float_struct_info +struct_passed_as_elements (call_builder *cb, ffi_type *top) +{ + float_struct_info ret = {0, 0, 0, 0}; + ffi_type *fields[3]; + int num_floats, num_ints; + int num_fields = flatten_struct (top, fields, fields + 3) - fields; + + if (num_fields == 1) + { + if (IS_FLOAT (fields[0]->type)) + { + ret.as_elements = 1; + ret.type1 = fields[0]->type; + } + } + else if (num_fields == 2) + { + num_floats = IS_FLOAT (fields[0]->type) + IS_FLOAT (fields[1]->type); + num_ints = IS_INT (fields[0]->type) + IS_INT (fields[1]->type); + if (num_floats == 0 || num_floats + num_ints != 2) + return ret; + if (cb->used_float + num_floats > NARGREG + || cb->used_integer + (2 - num_floats) > NARGREG) + return ret; + if (!IS_FLOAT (fields[0]->type) && !IS_FLOAT (fields[1]->type)) + return ret; + + ret.type1 = fields[0]->type; + ret.type2 = fields[1]->type; + ret.offset2 = FFI_ALIGN (fields[0]->size, fields[1]->alignment); + ret.as_elements = 1; + } + return ret; +} +#endif + +/* Allocates a single register, float register, or GRLEN-sized stack slot to a + datum. */ +static void +marshal_atom (call_builder *cb, int type, void *data) +{ + size_t value = 0; + switch (type) + { + case FFI_TYPE_UINT8: + value = *(uint8_t *) data; + break; + case FFI_TYPE_SINT8: + value = *(int8_t *) data; + break; + case FFI_TYPE_UINT16: + value = *(uint16_t *) data; + break; + case FFI_TYPE_SINT16: + value = *(int16_t *) data; + break; + /* 32-bit quantities are always sign-extended in the ABI. */ + case FFI_TYPE_UINT32: + value = *(int32_t *) data; + break; + case FFI_TYPE_SINT32: + value = *(int32_t *) data; + break; +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: + value = *(uint64_t *) data; + break; + case FFI_TYPE_SINT64: + value = *(int64_t *) data; + break; +#endif + case FFI_TYPE_POINTER: + value = *(size_t *) data; + break; + +#if ABI_FRLEN >= 32 + case FFI_TYPE_FLOAT: + *(float *)(cb->aregs->fa + cb->used_float++) = *(float *) data; + return; +#endif +#if ABI_FRLEN >= 64 + case FFI_TYPE_DOUBLE: + (cb->aregs->fa[cb->used_float++]) = *(double *) data; + return; +#endif + default: + FFI_ASSERT (0); + break; + } + + if (cb->used_integer == NARGREG) + *cb->used_stack++ = value; + else + cb->aregs->a[cb->used_integer++] = value; +} + +static void +unmarshal_atom (call_builder *cb, int type, void *data) +{ + size_t value; + switch (type) + { +#if ABI_FRLEN >= 32 + case FFI_TYPE_FLOAT: + *(float *) data = *(float *)(cb->aregs->fa + cb->used_float++); + return; +#endif +#if ABI_FRLEN >= 64 + case FFI_TYPE_DOUBLE: + *(double *) data = cb->aregs->fa[cb->used_float++]; + return; +#endif + } + + if (cb->used_integer == NARGREG) + value = *cb->used_stack++; + else + value = cb->aregs->a[cb->used_integer++]; + + switch (type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#endif + case FFI_TYPE_POINTER: + *(ffi_arg *)data = value; + break; + default: + FFI_ASSERT (0); + break; + } +} + +/* Allocate and copy a structure that is passed by value on the stack and + return a pointer to it. */ +static void * +allocate_and_copy_struct_to_stack (call_builder *cb, void *data, + ffi_type *type) +{ + size_t dest = cb->next_struct_area - type->size; + + dest = FFI_ALIGN_DOWN (dest, type->alignment); + cb->next_struct_area = dest; + + return memcpy ((char *)cb->stack + dest, data, type->size); +} + +/* Adds an argument to a call, or a not by reference return value. */ +static void +marshal (call_builder *cb, ffi_type *type, int var, void *data) +{ + size_t realign[2]; + +#if ABI_FRLEN + if (!var && type->type == FFI_TYPE_STRUCT) + { + float_struct_info fsi = struct_passed_as_elements (cb, type); + if (fsi.as_elements) + { + marshal_atom (cb, fsi.type1, data); + if (fsi.offset2) + marshal_atom (cb, fsi.type2, ((char *) data) + fsi.offset2); + return; + } + } + + if (!var && cb->used_float < NARGREG + && IS_FLOAT (type->type)) + { + marshal_atom (cb, type->type, data); + return; + } + + double promoted; + if (var && type->type == FFI_TYPE_FLOAT) + { + /* C standard requires promoting float -> double for variable arg. */ + promoted = *(float *) data; + type = &ffi_type_double; + data = &promoted; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) + /* Pass by reference. */ + { + allocate_and_copy_struct_to_stack (cb, data, type); + data = (char *)cb->stack + cb->next_struct_area; + marshal_atom (cb, FFI_TYPE_POINTER, &data); + } + else if (IS_INT (type->type) || type->type == FFI_TYPE_POINTER) + marshal_atom (cb, type->type, data); + else + { + /* Overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on. */ + + /* Variadics are aligned even in registers. */ + if (type->alignment > __SIZEOF_POINTER__) + { + if (var) + cb->used_integer = FFI_ALIGN (cb->used_integer, 2); + cb->used_stack + = (size_t *) FFI_ALIGN (cb->used_stack, 2 * __SIZEOF_POINTER__); + } + + memcpy (realign, data, type->size); + if (type->size > 0) + marshal_atom (cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + marshal_atom (cb, FFI_TYPE_POINTER, realign + 1); + } +} + +/* For arguments passed by reference returns the pointer, otherwise the arg + is copied (up to MAXCOPYARG bytes). */ +static void * +unmarshal (call_builder *cb, ffi_type *type, int var, void *data) +{ + size_t realign[2]; + void *pointer; + +#if ABI_FRLEN + if (!var && type->type == FFI_TYPE_STRUCT) + { + float_struct_info fsi = struct_passed_as_elements (cb, type); + if (fsi.as_elements) + { + unmarshal_atom (cb, fsi.type1, data); + if (fsi.offset2) + unmarshal_atom (cb, fsi.type2, ((char *) data) + fsi.offset2); + return data; + } + } + + if (!var && cb->used_float < NARGREG + && IS_FLOAT (type->type)) + { + unmarshal_atom (cb, type->type, data); + return data; + } + + if (var && type->type == FFI_TYPE_FLOAT) + { + int m = cb->used_integer; + void *promoted + = m < NARGREG ? cb->aregs->a + m : cb->used_stack + m - NARGREG + 1; + *(float *) promoted = *(double *) promoted; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) + { + /* Pass by reference. */ + unmarshal_atom (cb, FFI_TYPE_POINTER, (char *) &pointer); + return pointer; + } + else if (IS_INT (type->type) || type->type == FFI_TYPE_POINTER) + { + unmarshal_atom (cb, type->type, data); + return data; + } + else + { + /* Overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on. */ + + /* Variadics are aligned even in registers. */ + if (type->alignment > __SIZEOF_POINTER__) + { + if (var) + cb->used_integer = FFI_ALIGN (cb->used_integer, 2); + cb->used_stack + = (size_t *) FFI_ALIGN (cb->used_stack, 2 * __SIZEOF_POINTER__); + } + + if (type->size > 0) + unmarshal_atom (cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + unmarshal_atom (cb, FFI_TYPE_POINTER, realign + 1); + memcpy (data, realign, type->size); + return data; + } +} + +static int +passed_by_ref (call_builder *cb, ffi_type *type, int var) +{ +#if ABI_FRLEN + if (!var && type->type == FFI_TYPE_STRUCT) + { + float_struct_info fsi = struct_passed_as_elements (cb, type); + if (fsi.as_elements) + return 0; + } +#endif + + return type->size > 2 * __SIZEOF_POINTER__; +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + cif->loongarch_nfixedargs = cif->nargs; + return FFI_OK; +} + +/* Perform machine dependent cif processing when we have a variadic + function. */ +ffi_status +ffi_prep_cif_machdep_var (ffi_cif *cif, unsigned int nfixedargs, + unsigned int ntotalargs) +{ + cif->loongarch_nfixedargs = nfixedargs; + return FFI_OK; +} + +/* Low level routine for calling functions. */ +extern void ffi_call_asm (void *stack, struct call_context *regs, + void (*fn) (void), void *closure) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + /* This is a conservative estimate, assuming a complex return value and + that all remaining arguments are long long / __int128 */ + size_t arg_bytes = cif->bytes; + size_t rval_bytes = 0; + if (rvalue == NULL && cif->rtype->size > 2 * __SIZEOF_POINTER__) + rval_bytes = FFI_ALIGN (cif->rtype->size, STKALIGN); + size_t alloc_size = arg_bytes + rval_bytes + sizeof (call_context); + + /* The assembly code will deallocate all stack data at lower addresses + than the argument region, so we need to allocate the frame and the + return value after the arguments in a single allocation. */ + size_t alloc_base; + /* Argument region must be 16-byte aligned in LP64 ABIs. */ + if (_Alignof(max_align_t) >= STKALIGN) + /* Since sizeof long double is normally 16, the compiler will + guarantee alloca alignment to at least that much. */ + alloc_base = (size_t) alloca (alloc_size); + else + alloc_base = FFI_ALIGN (alloca (alloc_size + STKALIGN - 1), STKALIGN); + + if (rval_bytes) + rvalue = (void *) (alloc_base + arg_bytes); + + call_builder cb; + cb.used_float = cb.used_integer = 0; + cb.aregs = (call_context *) (alloc_base + arg_bytes + rval_bytes); + cb.used_stack = (void *) alloc_base; + cb.stack = (void *) alloc_base; + cb.next_struct_area = arg_bytes; + + int return_by_ref = passed_by_ref (&cb, cif->rtype, 0); + if (return_by_ref) + cb.aregs->a[cb.used_integer++] = (size_t)rvalue; + + int i; + for (i = 0; i < cif->nargs; i++) + marshal (&cb, cif->arg_types[i], i >= cif->loongarch_nfixedargs, + avalue[i]); + + ffi_call_asm ((void *) alloc_base, cb.aregs, fn, closure); + + cb.used_float = cb.used_integer = 0; + if (!return_by_ref && rvalue) + unmarshal (&cb, cif->rtype, 0, rvalue); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +extern void ffi_closure_asm (void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + uint32_t *tramp = (uint32_t *) &closure->tramp[0]; + uint64_t fn = (uint64_t) (uintptr_t) ffi_closure_asm; + + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + /* We will call ffi_closure_inner with codeloc, not closure, but as long + as the memory is readable it should work. */ + tramp[0] = 0x1800000c; /* pcaddi $t0, 0 (i.e. $t0 <- tramp) */ + tramp[1] = 0x28c0418d; /* ld.d $t1, $t0, 16 */ + tramp[2] = 0x4c0001a0; /* jirl $zero, $t1, 0 */ + tramp[3] = 0x03400000; /* nop */ + tramp[4] = fn; + tramp[5] = fn >> 32; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + __builtin___clear_cache (codeloc, codeloc + FFI_TRAMPOLINE_SIZE); + return FFI_OK; +} + +extern void ffi_go_closure_asm (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + closure->tramp = (void *) ffi_go_closure_asm; + closure->cif = cif; + closure->fun = fun; + return FFI_OK; +} + +/* Called by the assembly code with aregs pointing to saved argument registers + and stack pointing to the stacked arguments. Return values passed in + registers will be reloaded from aregs. */ +void FFI_HIDDEN +ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, size_t *stack, call_context *aregs) +{ + void **avalue = alloca (cif->nargs * sizeof (void *)); + /* Storage for arguments which will be copied by unmarshal(). We could + theoretically avoid the copies in many cases and use at most 128 bytes + of memory, but allocating disjoint storage for each argument is + simpler. */ + char *astorage = alloca (cif->nargs * MAXCOPYARG); + void *rvalue; + call_builder cb; + int return_by_ref; + int i; + + cb.aregs = aregs; + cb.used_integer = cb.used_float = 0; + cb.used_stack = stack; + + return_by_ref = passed_by_ref (&cb, cif->rtype, 0); + if (return_by_ref) + unmarshal (&cb, &ffi_type_pointer, 0, &rvalue); + else + rvalue = alloca (cif->rtype->size); + + for (i = 0; i < cif->nargs; i++) + avalue[i] + = unmarshal (&cb, cif->arg_types[i], i >= cif->loongarch_nfixedargs, + astorage + i * MAXCOPYARG); + + fun (cif, rvalue, avalue, user_data); + + if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID) + { + cb.used_integer = cb.used_float = 0; + marshal (&cb, cif->rtype, 0, rvalue); + } +} diff --git a/src/loongarch64/ffitarget.h b/src/loongarch64/ffitarget.h new file mode 100644 index 0000000..5a4698a --- /dev/null +++ b/src/loongarch64/ffitarget.h @@ -0,0 +1,82 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2022 Xu Chenghua + 2022 Cheng Lulu + + Target configuration macros for LoongArch. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error \ + "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef __loongarch__ +#error \ + "libffi was configured for a LoongArch target but this does not appear to be a LoongArch compiler." +#endif + +#ifndef LIBFFI_ASM + +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi +{ + FFI_FIRST_ABI = 0, + FFI_LP64S, + FFI_LP64F, + FFI_LP64D, + FFI_LAST_ABI, + +#if defined(__loongarch64) +#if defined(__loongarch_soft_float) + FFI_DEFAULT_ABI = FFI_LP64S +#elif defined(__loongarch_single_float) + FFI_DEFAULT_ABI = FFI_LP64F +#elif defined(__loongarch_double_float) + FFI_DEFAULT_ABI = FFI_LP64D +#else +#error unsupported LoongArch floating-point ABI +#endif +#else +#error unsupported LoongArch base architecture +#endif +} ffi_abi; + +#endif /* LIBFFI_ASM */ + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 +#define FFI_EXTRA_CIF_FIELDS \ + unsigned loongarch_nfixedargs; \ + unsigned loongarch_unused; +#define FFI_TARGET_SPECIFIC_VARIADIC +#endif diff --git a/src/loongarch64/sysv.S b/src/loongarch64/sysv.S new file mode 100644 index 0000000..9e0da11 --- /dev/null +++ b/src/loongarch64/sysv.S @@ -0,0 +1,296 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2022 Xu Chenghua + 2022 Cheng Lulu + + LoongArch Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Define aliases so that we can handle all ABIs uniformly. */ + +#if __SIZEOF_POINTER__ == 8 +# define PTRS 8 +# define LARG ld.d +# define SARG st.d +#else +# define PTRS 4 +# define LARG ld.w +# define SARG st.w +#endif + +#if defined(__loongarch_single_float) +# define FLTS 4 +# define FLD fld.w +# define FST fst.w +#elif defined(__loongarch_double_float) +# define FLTS 8 +# define FLARG fld.d +# define FSARG fst.d +#elif defined(__loongarch_soft_float) +# define FLTS 0 +#else +#error unsupported LoongArch floating-point ABI +#endif + + .text + .globl ffi_call_asm + .type ffi_call_asm, @function + .hidden ffi_call_asm +/* struct call_context + { + ABI_FLOAT fa[8]; + size_t a[10]; + } + + - 8 floating point parameter/result registers (fa[0] - fa[7]) + - 8 integer parameter/result registers (a[0] - a[7]) + - 2 registers used by the assembly code to in-place construct its own stack + frame. + - frame pointer (a[8]) + - return address (a[9]) + + void ffi_call_asm (size_t *stackargs, struct call_context *regargs, + void (*fn)(void), void *closure); */ + +#define FRAME_LEN (8 * FLTS + 10 * PTRS) + +ffi_call_asm: + .cfi_startproc + + /* We are NOT going to set up an ordinary stack frame. In order to pass + the stacked args to the called function, we adjust our stack pointer + to a0, which is in the _caller's_ alloca area. We establish our own + stack frame at the end of the call_context. + + Anything below the arguments will be freed at this point, although + we preserve the call_context so that it can be read back in the + caller. */ + + .cfi_def_cfa 5, FRAME_LEN # Interim CFA based on a1. + SARG $fp, $a1, FRAME_LEN - 2*PTRS + .cfi_offset 22, -2*PTRS + SARG $ra, $a1, FRAME_LEN - 1*PTRS + .cfi_offset 1, -1*PTRS + + addi.d $fp, $a1, FRAME_LEN + move $sp, $a0 + .cfi_def_cfa 22, 0 # Our frame is fully set up. + + # Load arguments. + move $t1, $a2 + move $t2, $a3 + +#if FLTS + FLARG $fa0, $fp, -FRAME_LEN+0*FLTS + FLARG $fa1, $fp, -FRAME_LEN+1*FLTS + FLARG $fa2, $fp, -FRAME_LEN+2*FLTS + FLARG $fa3, $fp, -FRAME_LEN+3*FLTS + FLARG $fa4, $fp, -FRAME_LEN+4*FLTS + FLARG $fa5, $fp, -FRAME_LEN+5*FLTS + FLARG $fa6, $fp, -FRAME_LEN+6*FLTS + FLARG $fa7, $fp, -FRAME_LEN+7*FLTS +#endif + + LARG $a0, $fp, -FRAME_LEN+8*FLTS+0*PTRS + LARG $a1, $fp, -FRAME_LEN+8*FLTS+1*PTRS + LARG $a2, $fp, -FRAME_LEN+8*FLTS+2*PTRS + LARG $a3, $fp, -FRAME_LEN+8*FLTS+3*PTRS + LARG $a4, $fp, -FRAME_LEN+8*FLTS+4*PTRS + LARG $a5, $fp, -FRAME_LEN+8*FLTS+5*PTRS + LARG $a6, $fp, -FRAME_LEN+8*FLTS+6*PTRS + LARG $a7, $fp, -FRAME_LEN+8*FLTS+7*PTRS + + /* Call */ + jirl $ra, $t1, 0 + +#if FLTS + /* Save return values - only a0/a1 (fa0/fa1) are used. */ + FSARG $fa0, $fp, -FRAME_LEN+0*FLTS + FSARG $fa1, $fp, -FRAME_LEN+1*FLTS +#endif + + SARG $a0, $fp, -FRAME_LEN+8*FLTS+0*PTRS + SARG $a1, $fp, -FRAME_LEN+8*FLTS+1*PTRS + + /* Restore and return. */ + addi.d $sp, $fp, -FRAME_LEN + .cfi_def_cfa 3, FRAME_LEN + LARG $ra, $fp, -1*PTRS + .cfi_restore 1 + LARG $fp, $fp, -2*PTRS + .cfi_restore 22 + jr $ra + .cfi_endproc + .size ffi_call_asm, .-ffi_call_asm + + +/* ffi_closure_asm. Expects address of the passed-in ffi_closure in t1. + void ffi_closure_inner (ffi_cif *cif, + void (*fun)(ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) */ + + .globl ffi_closure_asm + .hidden ffi_closure_asm + .type ffi_closure_asm, @function + +ffi_closure_asm: + .cfi_startproc + addi.d $sp, $sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* Make a frame. */ + SARG $fp, $sp, FRAME_LEN - 2*PTRS + .cfi_offset 22, -2*PTRS + SARG $ra, $sp, FRAME_LEN - 1*PTRS + .cfi_offset 1, -1*PTRS + addi.d $fp, $sp, FRAME_LEN + + /* Save arguments. */ +#if FLTS + FSARG $fa0, $sp, 0*FLTS + FSARG $fa1, $sp, 1*FLTS + FSARG $fa2, $sp, 2*FLTS + FSARG $fa3, $sp, 3*FLTS + FSARG $fa4, $sp, 4*FLTS + FSARG $fa5, $sp, 5*FLTS + FSARG $fa6, $sp, 6*FLTS + FSARG $fa7, $sp, 7*FLTS +#endif + + SARG $a0, $sp, 8*FLTS+0*PTRS + SARG $a1, $sp, 8*FLTS+1*PTRS + SARG $a2, $sp, 8*FLTS+2*PTRS + SARG $a3, $sp, 8*FLTS+3*PTRS + SARG $a4, $sp, 8*FLTS+4*PTRS + SARG $a5, $sp, 8*FLTS+5*PTRS + SARG $a6, $sp, 8*FLTS+6*PTRS + SARG $a7, $sp, 8*FLTS+7*PTRS + + /* Enter C */ + LARG $a0, $t0, FFI_TRAMPOLINE_SIZE+0*PTRS + LARG $a1, $t0, FFI_TRAMPOLINE_SIZE+1*PTRS + LARG $a2, $t0, FFI_TRAMPOLINE_SIZE+2*PTRS + addi.d $a3, $sp, FRAME_LEN + move $a4, $sp + + bl ffi_closure_inner + + /* Return values. */ +#if FLTS + FLARG $fa0, $sp, 0*FLTS + FLARG $fa1, $sp, 1*FLTS +#endif + + LARG $a0, $sp, 8*FLTS+0*PTRS + LARG $a1, $sp, 8*FLTS+1*PTRS + + /* Restore and return. */ + LARG $ra, $sp, FRAME_LEN-1*PTRS + .cfi_restore 1 + LARG $fp, $sp, FRAME_LEN-2*PTRS + .cfi_restore 22 + addi.d $sp, $sp, FRAME_LEN + .cfi_def_cfa_offset 0 + jr $ra + .cfi_endproc + .size ffi_closure_asm, .-ffi_closure_asm + +/* ffi_go_closure_asm. Expects address of the passed-in ffi_go_closure in t2. + void ffi_closure_inner (ffi_cif *cif, + void (*fun)(ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) */ + + .globl ffi_go_closure_asm + .hidden ffi_go_closure_asm + .type ffi_go_closure_asm, @function + +ffi_go_closure_asm: + .cfi_startproc + addi.d $sp, $sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* Make a frame. */ + SARG $fp, $sp, FRAME_LEN - 2*PTRS + .cfi_offset 22, -2*PTRS + SARG $ra, $sp, FRAME_LEN - 1*PTRS + .cfi_offset 1, -1*PTRS + addi.d $fp, $sp, FRAME_LEN + + /* Save arguments. */ +#if FLTS + FSARG $fa0, $sp, 0*FLTS + FSARG $fa1, $sp, 1*FLTS + FSARG $fa2, $sp, 2*FLTS + FSARG $fa3, $sp, 3*FLTS + FSARG $fa4, $sp, 4*FLTS + FSARG $fa5, $sp, 5*FLTS + FSARG $fa6, $sp, 6*FLTS + FSARG $fa7, $sp, 7*FLTS +#endif + + SARG $a0, $sp, 8*FLTS+0*PTRS + SARG $a1, $sp, 8*FLTS+1*PTRS + SARG $a2, $sp, 8*FLTS+2*PTRS + SARG $a3, $sp, 8*FLTS+3*PTRS + SARG $a4, $sp, 8*FLTS+4*PTRS + SARG $a5, $sp, 8*FLTS+5*PTRS + SARG $a6, $sp, 8*FLTS+6*PTRS + SARG $a7, $sp, 8*FLTS+7*PTRS + + /* Enter C */ + LARG $a0, $t2, 1*PTRS + LARG $a1, $t2, 2*PTRS + move $a2, $t2 + addi.d $a3, $sp, FRAME_LEN + move $a4, $sp + + bl ffi_closure_inner + + /* Return values. */ +#if FLTS + FLARG $fa0, $sp, 0*FLTS + FLARG $fa1, $sp, 1*FLTS +#endif + + LARG $a0, $sp, 8*FLTS+0*PTRS + LARG $a1, $sp, 8*FLTS+1*PTRS + + /* Restore and return. */ + LARG $ra, $sp, FRAME_LEN-1*PTRS + .cfi_restore 1 + LARG $fp, $sp, FRAME_LEN-2*PTRS + .cfi_restore 22 + addi.d $sp, $sp, FRAME_LEN + .cfi_def_cfa_offset 0 + jr $ra + .cfi_endproc + .size ffi_go_closure_asm, .-ffi_go_closure_asm + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif -- cgit v1.2.1