summaryrefslogtreecommitdiff
path: root/src/base/linux_syscall_support.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/base/linux_syscall_support.h')
-rw-r--r--src/base/linux_syscall_support.h206
1 files changed, 201 insertions, 5 deletions
diff --git a/src/base/linux_syscall_support.h b/src/base/linux_syscall_support.h
index d6899b8..b807b11 100644
--- a/src/base/linux_syscall_support.h
+++ b/src/base/linux_syscall_support.h
@@ -131,12 +131,12 @@
#define SYS_LINUX_SYSCALL_SUPPORT_H
/* We currently only support x86-32, x86-64, ARM, MIPS, PPC/PPC64, Aarch64,
- * s390, s390x, and riscv64 on Linux.
+ * s390, s390x, riscv64, and loongarch64 on Linux.
* Porting to other related platforms should not be difficult.
*/
#if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
defined(__mips__) || defined(__mips64) || defined(__mips64el__) || defined(__PPC__) || \
- defined(__aarch64__) || defined(__s390__) || defined(__riscv)) \
+ defined(__aarch64__) || defined(__s390__) || defined(__riscv)) || defined(__loongarch64) \
&& (defined(__linux))
#ifndef SYS_CPLUSPLUS
@@ -541,6 +541,31 @@ struct kernel_stat {
unsigned long __unused4;
unsigned long __unused5;
};
+
+/*From linux/include/uapi/asm-generic/stat.h */
+#elif defined(__loongarch64)
+struct kernel_stat {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned long st_rdev;
+ unsigned long __pad1;
+ long st_size;
+ int st_blksize;
+ int __pad2;
+ long st_blocks;
+ long st_atime_;
+ unsigned long st_atime_nsec_;
+ long st_mtime_;
+ unsigned long st_mtime_nsec_;
+ long st_ctime_;
+ unsigned long st_ctime_nsec_;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
#endif
@@ -954,8 +979,24 @@ struct kernel_stat {
# ifndef __NR_fstatat
# define __NR_fstatat 79
# endif
-#endif
+#elif defined(__loongarch64)
+#ifndef __NR_gettid
+#define __NR_gettid 178
+#endif
+#ifndef __NR_futex
+#define __NR_futex 98
+#endif
+#ifndef __NR_openat
+#define __NR_openat 56
+#endif
+#ifndef __NR_fstatat
+#define __NR_fstatat 79
+#endif
+#ifndef __NR_getdents64
+#define __NR_getdents64 61
+#endif /* End of loongarch64 defininitions */
+#endif
/* After forking, we must make sure to only call system calls. */
#if __BOUNDED_POINTERS__
@@ -1017,7 +1058,8 @@ struct kernel_stat {
#undef LSS_RETURN
#if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
- defined(__aarch64__) || defined(__s390__) || defined(__riscv))
+ defined(__aarch64__) || defined(__s390__) || defined(__riscv)) || \
+ defined(__loongarch64)
/* Failing system calls return a negative result in the range of
* -1..-4095. These are "errno" values with the sign inverted.
*/
@@ -2564,6 +2606,159 @@ struct kernel_stat {
LSS_BODY(type, name, "r"(__a1), "r"(__a2), "r"(__a3), "r"(__a4), \
"r"(__a5)); \
}
+
+ #elif defined(__loongarch64)
+ #undef LSS_REG
+ #define LSS_REG(r,a) register long __a##r __asm__("$a"#r) = (long)a
+ #define LOONGARCH__SYSCALL_CLOBBERS \
+ "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", \
+ "memory"
+ #undef LSS_BODY
+ #define LSS_BODY(type,name,args...) \
+ register long __a7 __asm__("$a7") = __NR_##name; \
+ long __res; \
+ __asm__ __volatile__ ( \
+ "syscall 0\n\t" \
+ : "+r" (__a0) \
+ : "r" (__a7), ##args \
+ : LOONGARCH__SYSCALL_CLOBBERS); \
+ __res = __a0; \
+ LSS_RETURN(type, __res)
+ #undef _syscall0
+ #define _syscall0(type,name) \
+ type LSS_NAME(name)(void) { \
+ register long __a7 __asm__("$a7") = __NR_##name; \
+ register long __a0 __asm__("$a0"); \
+ long __res; \
+ __asm__ __volatile__ ( \
+ "syscall 0\n\t" \
+ : "=r" (__a0) \
+ : "r" (__a7) \
+ : LOONGARCH__SYSCALL_CLOBBERS); \
+ __res = __a0; \
+ LSS_RETURN(type, __res); \
+ }
+ #undef _syscall1
+ #define _syscall1(type,name,type1,arg1) \
+ type LSS_NAME(name)(type1 arg1) { \
+ LSS_REG(0, arg1); \
+ LSS_BODY(type,name); \
+ }
+ #undef _syscall2
+ #define _syscall2(type,name,type1,arg1,type2,arg2) \
+ type LSS_NAME(name)(type1 arg1, type2 arg2) { \
+ LSS_REG(0,arg1); \
+ LSS_REG(1,arg2); \
+ LSS_BODY(type, name,"r"(__a1)); \
+ }
+ #undef _syscall3
+ #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+ type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3) { \
+ LSS_REG(0,arg1); \
+ LSS_REG(1,arg2); \
+ LSS_REG(2,arg3); \
+ LSS_BODY(type, name,"r"(__a1), "r"(__a2)); \
+ }
+ #undef _syscall4
+ #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3, \
+ type4,arg4) \
+ type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3,type4 arg4) { \
+ LSS_REG(0,arg1); \
+ LSS_REG(1,arg2); \
+ LSS_REG(2,arg3); \
+ LSS_REG(3,arg4); \
+ LSS_BODY(type,name, "r"(__a1), "r"(__a2), "r"(__a3)); \
+ }
+ #undef _syscall5
+ #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3, \
+ type4,arg4,type5,arg5) \
+ type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3,type4 arg4, \
+ type5 arg5) { \
+ LSS_REG(0,arg1); \
+ LSS_REG(1,arg2); \
+ LSS_REG(2,arg3); \
+ LSS_REG(3,arg4); \
+ LSS_REG(4,arg5); \
+ LSS_BODY(type,name,"r"(__a1), "r"(__a2), "r"(__a3), "r"(__a4)); \
+ }
+ #undef _syscall6
+ #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3, \
+ type4,arg4,type5,arg5,type6,arg6) \
+ type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3,type4 arg4, \
+ type5 arg5,type6 arg6) { \
+ LSS_REG(0,arg1); \
+ LSS_REG(1,arg2); \
+ LSS_REG(2,arg3); \
+ LSS_REG(3,arg4); \
+ LSS_REG(4,arg5); \
+ LSS_REG(5,arg6); \
+ LSS_BODY(type,name,"r"(__a1), "r"(__a2), "r"(__a3), "r"(__a4), \
+ "r"(__a5)); \
+ }
+
+ LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *arg), void *child_stack,
+ int flags, void *arg, int *parent_tidptr,
+ void *newtls, int *child_tidptr) {
+ long __res;
+ {
+ register int (*__fn)(void *) __asm__("$a0") = fn;
+ register void *__stack __asm__("$a1") = child_stack;
+ register int __flags __asm__("$a2") = flags;
+ register void *__arg __asm__("$a3") = arg;
+ register int *__ptid __asm__("$a4") = parent_tidptr;
+ register void *__tls __asm__("$a5") = newtls;
+ register int *__ctid __asm__("$a6") = child_tidptr;
+ __asm__ __volatile__(
+ /* Align stack to 16 or 8 bytes per the ABI. */
+ "bstrins.d $a1,$zero, 3, 0\n"
+ /*Sanity check arguments */
+ "beqz $a0, 1f\n" /* No NULL function pointers. */
+ "beqz $a1, 1f\n" /* No NULL stack pointers. */
+
+ /*Save argument pointer */
+ "addi.d $a1, $a1, -16\n" /* Reserve argument save space. */
+ "st.d $a0, $a1, 0\n" /* Save function pointer. */
+ "st.d $a3, $a1, 8\n" /* Save argument pointer. */
+
+
+ /* The syscall expects the args to be in different slots. */
+ "or $a0, $a2, $zero\n"
+ "or $a2, $a4, $zero\n"
+ "or $a3, $a6, $zero\n"
+ "or $a4, $a5, $zero\n"
+ /* Do the system call */
+ "li.d $a7, %9\n"
+ "syscall 0\n"
+
+ "bnez $a5, 2f\n"
+ "bnez $a7, 2f\n"
+
+ "ld.d $a1, $sp, 0\n" /* Function pointer. */
+ "ld.d $a0, $sp, 8\n" /* Argument pointer. */
+
+ /* Call the user's function. */
+ "jirl $ra, $a1, 0\n"
+
+ /* Call exit with the function's return value. */
+ "li.d $a0, %10\n"
+ "syscall 0\n"
+
+ "1:\n"
+ "li.d $t0, %1\n"
+ "or $a0, $t0, $zero\n"
+
+ "2:\n"
+
+ : "=r" (__res)
+ : "i"(-EINVAL),
+ "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+ "r"(__ptid), "r"(__tls), "r"(__ctid),
+ "i"(__NR_clone), "i"(__NR_exit)
+ : "memory");
+ }
+LSS_RETURN(int, __res);
+ }
+
#endif
#define __NR__exit __NR_exit
#define __NR__gettid __NR_gettid
@@ -2657,7 +2852,7 @@ struct kernel_stat {
LSS_INLINE _syscall3(long, getcpu, unsigned *, cpu,
unsigned *, node, void *, unused);
#endif
- #if defined(__x86_64__) || defined(__aarch64__) || \
+ #if defined(__x86_64__) || defined(__aarch64__) || defined(__loongarch64) || \
(defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32)
LSS_INLINE _syscall3(int, socket, int, d,
int, t, int, p)
@@ -2691,6 +2886,7 @@ struct kernel_stat {
}
#endif
#if (defined(__aarch64__)) || \
+ (defined(__loongarch64)) || \
(defined(__mips__) \
&& (_MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32))
LSS_INLINE int LSS_NAME(sigaction)(int signum,