summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorzherczeg <zherczeg@2f5784b3-3f2a-0410-8824-cb99058d5e15>2016-03-04 09:08:03 +0000
committerzherczeg <zherczeg@2f5784b3-3f2a-0410-8824-cb99058d5e15>2016-03-04 09:08:03 +0000
commit573974bafc36f274d2d4d1800e424fdaec4da683 (patch)
tree79601e1dd9b89d71be2bf7ea1e25837d6d9e03cf
parentc7f48e2e4016eed7764962e99244b9ba699cc1dd (diff)
downloadpcre-573974bafc36f274d2d4d1800e424fdaec4da683.tar.gz
JIT compiler update.
git-svn-id: svn://vcs.exim.org/pcre/code/trunk@1644 2f5784b3-3f2a-0410-8824-cb99058d5e15
-rw-r--r--sljit/sljitConfigInternal.h28
-rw-r--r--sljit/sljitLir.c152
-rw-r--r--sljit/sljitLir.h246
-rw-r--r--sljit/sljitNativeARM_32.c120
-rw-r--r--sljit/sljitNativeARM_64.c88
-rw-r--r--sljit/sljitNativeARM_T2_32.c108
-rw-r--r--sljit/sljitNativeMIPS_common.c116
-rw-r--r--sljit/sljitNativePPC_common.c102
-rw-r--r--sljit/sljitNativeSPARC_common.c84
-rw-r--r--sljit/sljitNativeTILEGX_64.c10
-rw-r--r--sljit/sljitNativeX86_common.c144
11 files changed, 606 insertions, 592 deletions
diff --git a/sljit/sljitConfigInternal.h b/sljit/sljitConfigInternal.h
index 8baa845..f2ef022 100644
--- a/sljit/sljitConfigInternal.h
+++ b/sljit/sljitConfigInternal.h
@@ -34,11 +34,11 @@
sljit_s8, sljit_u8 : signed and unsigned 8 bit integer type
sljit_s16, sljit_u16 : signed and unsigned 16 bit integer type
sljit_s32, sljit_u32 : signed and unsigned 32 bit integer type
- sljit_sw, sljit_uw : signed and unsigned machine word, enough to store a pointer
- sljit_p : unsgined pointer value (usually the same as sljit_uw, but
- some 64 bit ABIs may use 32 bit pointers)
- sljit_s : single precision floating point value
- sljit_d : double precision floating point value
+ sljit_sw, sljit_uw : signed and unsigned machine word, enough to store a pointer
+ sljit_p : unsgined pointer value (usually the same as sljit_uw, but
+ some 64 bit ABIs may use 32 bit pointers)
+ sljit_f32 : 32 bit single precision floating point value
+ sljit_f64 : 64 bit double precision floating point value
Macros for feature detection (boolean):
SLJIT_32BIT_ARCHITECTURE : 32 bit architecture
@@ -56,10 +56,10 @@
SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS : number of available floating point scratch registers
SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS : number of available floating point saved registers
SLJIT_WORD_SHIFT : the shift required to apply when accessing a sljit_sw/sljit_uw array by index
- SLJIT_DOUBLE_SHIFT : the shift required to apply when accessing
- a double precision floating point array by index
- SLJIT_SINGLE_SHIFT : the shift required to apply when accessing
- a single precision floating point array by index
+ SLJIT_F32_SHIFT : the shift required to apply when accessing
+ a single precision floating point array by index
+ SLJIT_F64_SHIFT : the shift required to apply when accessing
+ a double precision floating point array by index
SLJIT_LOCALS_OFFSET : local space starting offset (SLJIT_SP + SLJIT_LOCALS_OFFSET)
SLJIT_RETURN_ADDRESS_OFFSET : a return instruction always adds this offset to the return address
@@ -325,7 +325,7 @@
#endif /* !SLJIT_CACHE_FLUSH */
/******************************************************/
-/* Byte/half/int/word/single/double type definitions. */
+/* Integer and floating point type definitions. */
/******************************************************/
/* 8 bit byte type. */
@@ -372,15 +372,15 @@ typedef long int sljit_sw;
typedef sljit_uw sljit_p;
/* Floating point types. */
-typedef float sljit_s;
-typedef double sljit_d;
+typedef float sljit_f32;
+typedef double sljit_f64;
/* Shift for pointer sized data. */
#define SLJIT_POINTER_SHIFT SLJIT_WORD_SHIFT
/* Shift for double precision sized data. */
-#define SLJIT_DOUBLE_SHIFT 3
-#define SLJIT_SINGLE_SHIFT 2
+#define SLJIT_F32_SHIFT 2
+#define SLJIT_F64_SHIFT 3
#ifndef SLJIT_W
diff --git a/sljit/sljitLir.c b/sljit/sljitLir.c
index cdabac8..ec1781e 100644
--- a/sljit/sljitLir.c
+++ b/sljit/sljitLir.c
@@ -341,9 +341,9 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allo
&& (sizeof(sljit_sw) == 4 || sizeof(sljit_sw) == 8)
&& (sizeof(sljit_uw) == 4 || sizeof(sljit_uw) == 8),
invalid_integer_types);
- SLJIT_COMPILE_ASSERT(SLJIT_I32_OP == SLJIT_SINGLE_OP,
+ SLJIT_COMPILE_ASSERT(SLJIT_I32_OP == SLJIT_F32_OP,
int_op_and_single_op_must_be_the_same);
- SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_SINGLE_OP,
+ SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_F32_OP,
rewritable_jump_and_single_op_must_not_be_the_same);
/* Only the non-zero members must be set. */
@@ -654,8 +654,8 @@ static SLJIT_INLINE void set_const(struct sljit_const *const_, struct sljit_comp
break; \
case SLJIT_BREAKPOINT: \
case SLJIT_NOP: \
- case SLJIT_LUMUL: \
- case SLJIT_LSMUL: \
+ case SLJIT_LMUL_UW: \
+ case SLJIT_LMUL_SW: \
case SLJIT_MOV: \
case SLJIT_MOV_U32: \
case SLJIT_MOV_P: \
@@ -666,7 +666,7 @@ static SLJIT_INLINE void set_const(struct sljit_const *const_, struct sljit_comp
CHECK_ARGUMENT(!(op & (SLJIT_I32_OP | SLJIT_SET_E | SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \
break; \
default: \
- /* Only SLJIT_I32_OP or SLJIT_SINGLE_OP is allowed. */ \
+ /* Only SLJIT_I32_OP or SLJIT_F32_OP is allowed. */ \
CHECK_ARGUMENT(!(op & (SLJIT_SET_E | SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \
break; \
}
@@ -674,12 +674,12 @@ static SLJIT_INLINE void set_const(struct sljit_const *const_, struct sljit_comp
#define FUNCTION_CHECK_FOP() \
CHECK_ARGUMENT(!GET_FLAGS(op) || !(op & SLJIT_KEEP_FLAGS)); \
switch (GET_OPCODE(op)) { \
- case SLJIT_DCMP: \
+ case SLJIT_CMP_F64: \
CHECK_ARGUMENT(!(op & (SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \
CHECK_ARGUMENT((op & (SLJIT_SET_E | SLJIT_SET_S))); \
break; \
default: \
- /* Only SLJIT_I32_OP or SLJIT_SINGLE_OP is allowed. */ \
+ /* Only SLJIT_I32_OP or SLJIT_F32_OP is allowed. */ \
CHECK_ARGUMENT(!(op & (SLJIT_SET_E | SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \
break; \
}
@@ -845,15 +845,15 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *comp
}
static const char* op0_names[] = {
- (char*)"breakpoint", (char*)"nop", (char*)"lumul", (char*)"lsmul",
- (char*)"udivmod", (char*)"sdivmod", (char*)"udivi", (char*)"sdivi"
+ (char*)"breakpoint", (char*)"nop", (char*)"lmul.uw", (char*)"lmul.sw",
+ (char*)"divmod.u", (char*)"divmod.s", (char*)"div.u", (char*)"div.s"
};
static const char* op1_names[] = {
- (char*)"mov", (char*)"mov_ub", (char*)"mov_sb", (char*)"mov_uh",
- (char*)"mov_sh", (char*)"mov_ui", (char*)"mov_si", (char*)"mov_p",
- (char*)"movu", (char*)"movu_ub", (char*)"movu_sb", (char*)"movu_uh",
- (char*)"movu_sh", (char*)"movu_ui", (char*)"movu_si", (char*)"movu_p",
+ (char*)"", (char*)".u8", (char*)".s8", (char*)".u16",
+ (char*)".s16", (char*)".u32", (char*)".s32", (char*)".p",
+ (char*)"", (char*)".u8", (char*)".s8", (char*)".u16",
+ (char*)".s16", (char*)".u32", (char*)".s32", (char*)".p",
(char*)"not", (char*)"neg", (char*)"clz",
};
@@ -873,9 +873,9 @@ static const char* fop2_names[] = {
(char*)"add", (char*)"sub", (char*)"mul", (char*)"div"
};
-#define JUMP_PREFIX(type) \
- ((type & 0xff) <= SLJIT_MUL_NOT_OVERFLOW ? ((type & SLJIT_I32_OP) ? "i_" : "") \
- : ((type & 0xff) <= SLJIT_D_ORDERED ? ((type & SLJIT_SINGLE_OP) ? "s_" : "d_") : ""))
+#define JUMP_POSTFIX(type) \
+ ((type & 0xff) <= SLJIT_MUL_NOT_OVERFLOW ? ((type & SLJIT_I32_OP) ? "32" : "") \
+ : ((type & 0xff) <= SLJIT_ORDERED_F64 ? ((type & SLJIT_F32_OP) ? ".f32" : ".f64") : ""))
static char* jump_names[] = {
(char*)"equal", (char*)"not_equal",
@@ -993,7 +993,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_return(struct sljit_compi
if (op == SLJIT_UNUSED)
fprintf(compiler->verbose, " return\n");
else {
- fprintf(compiler->verbose, " return.%s ", op1_names[op - SLJIT_OP1_BASE]);
+ fprintf(compiler->verbose, " return%s ", op1_names[op - SLJIT_OP1_BASE]);
sljit_verbose_param(compiler, src, srcw);
fprintf(compiler->verbose, "\n");
}
@@ -1035,13 +1035,19 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fast_return(struct sljit_
static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- CHECK_ARGUMENT((op >= SLJIT_BREAKPOINT && op <= SLJIT_LSMUL)
- || ((op & ~SLJIT_I32_OP) >= SLJIT_UDIVMOD && (op & ~SLJIT_I32_OP) <= SLJIT_SDIVI));
- CHECK_ARGUMENT(op < SLJIT_LUMUL || compiler->scratches >= 2);
+ CHECK_ARGUMENT((op >= SLJIT_BREAKPOINT && op <= SLJIT_LMUL_SW)
+ || ((op & ~SLJIT_I32_OP) >= SLJIT_DIVMOD_UW && (op & ~SLJIT_I32_OP) <= SLJIT_DIV_SW));
+ CHECK_ARGUMENT(op < SLJIT_LMUL_UW || compiler->scratches >= 2);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose))
- fprintf(compiler->verbose, " %s%s\n", !(op & SLJIT_I32_OP) ? "" : "i", op0_names[GET_OPCODE(op) - SLJIT_OP0_BASE]);
+ {
+ fprintf(compiler->verbose, " %s", op0_names[GET_OPCODE(op) - SLJIT_OP0_BASE]);
+ if (GET_OPCODE(op) >= SLJIT_DIVMOD_UW) {
+ fprintf(compiler->verbose, (op & SLJIT_I32_OP) ? "32" : "w");
+ }
+ fprintf(compiler->verbose, "\n");
+ }
#endif
CHECK_RETURN_OK;
}
@@ -1064,9 +1070,18 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op1(struct sljit_compiler
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s%s%s%s%s%s%s%s ", !(op & SLJIT_I32_OP) ? "" : "i", op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE],
- !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_U) ? "" : ".u", !(op & SLJIT_SET_S) ? "" : ".s",
- !(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k");
+ if (GET_OPCODE(op) <= SLJIT_MOVU_P)
+ {
+ fprintf(compiler->verbose, " mov%s%s%s ", (GET_OPCODE(op) >= SLJIT_MOVU) ? "u" : "",
+ !(op & SLJIT_I32_OP) ? "" : "32", (op != SLJIT_MOV32 && op != SLJIT_MOVU32) ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : "");
+ }
+ else
+ {
+ fprintf(compiler->verbose, " %s%s%s%s%s%s%s%s ", op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE], !(op & SLJIT_I32_OP) ? "" : "32",
+ !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_U) ? "" : ".u", !(op & SLJIT_SET_S) ? "" : ".s",
+ !(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k");
+ }
+
sljit_verbose_param(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, src, srcw);
@@ -1095,7 +1110,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s%s%s%s%s%s%s%s ", !(op & SLJIT_I32_OP) ? "" : "i", op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE],
+ fprintf(compiler->verbose, " %s%s%s%s%s%s%s%s ", op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE], !(op & SLJIT_I32_OP) ? "" : "32",
!(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_U) ? "" : ".u", !(op & SLJIT_SET_S) ? "" : ".s",
!(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k");
sljit_verbose_param(compiler, dst, dstw);
@@ -1170,19 +1185,19 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1(struct sljit_compile
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_is_fpu_available());
- CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_DMOV && GET_OPCODE(op) <= SLJIT_DABS);
+ CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV_F64 && GET_OPCODE(op) <= SLJIT_ABS_F64);
FUNCTION_CHECK_FOP();
FUNCTION_FCHECK(src, srcw);
FUNCTION_FCHECK(dst, dstw);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMS)
- fprintf(compiler->verbose, " %s%s ", fop1_names[SLJIT_CONVD_FROMS - SLJIT_FOP1_BASE],
- (op & SLJIT_SINGLE_OP) ? "s.fromd" : "d.froms");
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
+ fprintf(compiler->verbose, " %s%s ", fop1_names[SLJIT_CONV_F64_FROM_F32 - SLJIT_FOP1_BASE],
+ (op & SLJIT_F32_OP) ? ".f32.from.f64" : ".f64.from.f32");
else
- fprintf(compiler->verbose, " %s%s ", (op & SLJIT_SINGLE_OP) ? "s" : "d",
- fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE]);
+ fprintf(compiler->verbose, " %s%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
+ (op & SLJIT_F32_OP) ? ".f32" : ".f64");
sljit_verbose_fparam(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
@@ -1204,14 +1219,14 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_cmp(struct sljit_com
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_is_fpu_available());
- CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_DCMP);
+ CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_CMP_F64);
FUNCTION_CHECK_FOP();
FUNCTION_FCHECK(src1, src1w);
FUNCTION_FCHECK(src2, src2w);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s%s%s%s ", (op & SLJIT_SINGLE_OP) ? "s" : "d", fop1_names[SLJIT_DCMP - SLJIT_FOP1_BASE],
+ fprintf(compiler->verbose, " %s%s%s%s ", fop1_names[SLJIT_CMP_F64 - SLJIT_FOP1_BASE], (op & SLJIT_F32_OP) ? ".f32" : ".f64",
(op & SLJIT_SET_E) ? ".e" : "", (op & SLJIT_SET_S) ? ".s" : "");
sljit_verbose_fparam(compiler, src1, src1w);
fprintf(compiler->verbose, ", ");
@@ -1222,7 +1237,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_cmp(struct sljit_com
CHECK_RETURN_OK;
}
-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -1233,7 +1248,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_convw_fromd(struct s
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_is_fpu_available());
- CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONVW_FROMD && GET_OPCODE(op) <= SLJIT_CONVI_FROMD);
+ CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONV_SW_FROM_F64 && GET_OPCODE(op) <= SLJIT_CONV_S32_FROM_F64);
FUNCTION_CHECK_FOP();
FUNCTION_FCHECK(src, srcw);
FUNCTION_CHECK_DST(dst, dstw);
@@ -1241,8 +1256,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_convw_fromd(struct s
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
fprintf(compiler->verbose, " %s%s.from%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
- (GET_OPCODE(op) == SLJIT_CONVI_FROMD) ? "i" : "w",
- (op & SLJIT_SINGLE_OP) ? "s" : "d");
+ (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? ".s32" : ".sw",
+ (op & SLJIT_F32_OP) ? ".f32" : ".f64");
sljit_verbose_param(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
sljit_verbose_fparam(compiler, src, srcw);
@@ -1252,7 +1267,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_convw_fromd(struct s
CHECK_RETURN_OK;
}
-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -1263,7 +1278,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_convd_fromw(struct s
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_is_fpu_available());
- CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONVD_FROMW && GET_OPCODE(op) <= SLJIT_CONVD_FROMI);
+ CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONV_F64_FROM_SW && GET_OPCODE(op) <= SLJIT_CONV_F64_FROM_S32);
FUNCTION_CHECK_FOP();
FUNCTION_CHECK_SRC(src, srcw);
FUNCTION_FCHECK(dst, dstw);
@@ -1271,8 +1286,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_convd_fromw(struct s
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
fprintf(compiler->verbose, " %s%s.from%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
- (op & SLJIT_SINGLE_OP) ? "s" : "d",
- (GET_OPCODE(op) == SLJIT_CONVD_FROMI) ? "i" : "w");
+ (op & SLJIT_F32_OP) ? ".f32" : ".f64",
+ (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? ".s32" : ".sw");
sljit_verbose_fparam(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, src, srcw);
@@ -1289,7 +1304,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop2(struct sljit_compile
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_is_fpu_available());
- CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_DADD && GET_OPCODE(op) <= SLJIT_DDIV);
+ CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_ADD_F64 && GET_OPCODE(op) <= SLJIT_DIV_F64);
FUNCTION_CHECK_FOP();
FUNCTION_FCHECK(src1, src1w);
FUNCTION_FCHECK(src2, src2w);
@@ -1297,7 +1312,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop2(struct sljit_compile
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " %s%s ", (op & SLJIT_SINGLE_OP) ? "s" : "d", fop2_names[GET_OPCODE(op) - SLJIT_FOP2_BASE]);
+ fprintf(compiler->verbose, " %s%s ", fop2_names[GET_OPCODE(op) - SLJIT_FOP2_BASE], (op & SLJIT_F32_OP) ? ".f32" : ".f64");
sljit_verbose_fparam(compiler, dst, dstw);
fprintf(compiler->verbose, ", ");
sljit_verbose_fparam(compiler, src1, src1w);
@@ -1335,8 +1350,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_jump(struct sljit_compile
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose))
- fprintf(compiler->verbose, " jump%s.%s%s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
- JUMP_PREFIX(type), jump_names[type & 0xff]);
+ fprintf(compiler->verbose, " jump%s %s%s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
+ jump_names[type & 0xff], JUMP_POSTFIX(type));
#endif
CHECK_RETURN_OK;
}
@@ -1353,8 +1368,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmp(struct sljit_compiler
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " cmp%s.%s%s ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
- (type & SLJIT_I32_OP) ? "i_" : "", jump_names[type & 0xff]);
+ fprintf(compiler->verbose, " cmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
+ jump_names[type & 0xff], (type & SLJIT_I32_OP) ? "32" : "");
sljit_verbose_param(compiler, src1, src1w);
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, src2, src2w);
@@ -1370,15 +1385,15 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fcmp(struct sljit_compile
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_is_fpu_available());
- CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_SINGLE_OP)));
- CHECK_ARGUMENT((type & 0xff) >= SLJIT_D_EQUAL && (type & 0xff) <= SLJIT_D_ORDERED);
+ CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_F32_OP)));
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL_F64 && (type & 0xff) <= SLJIT_ORDERED_F64);
FUNCTION_FCHECK(src1, src1w);
FUNCTION_FCHECK(src2, src2w);
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " fcmp%s.%s%s ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
- (type & SLJIT_SINGLE_OP) ? "s_" : "d_", jump_names[type & 0xff]);
+ fprintf(compiler->verbose, " fcmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
+ jump_names[type & 0xff], (type & SLJIT_F32_OP) ? ".f32" : ".f64");
sljit_verbose_fparam(compiler, src1, src1w);
fprintf(compiler->verbose, ", ");
sljit_verbose_fparam(compiler, src2, src2w);
@@ -1417,7 +1432,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_flags(struct sljit_com
{
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP)));
- CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_D_ORDERED);
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64);
CHECK_ARGUMENT(op == SLJIT_MOV || GET_OPCODE(op) == SLJIT_MOV_U32 || GET_OPCODE(op) == SLJIT_MOV_S32
|| (GET_OPCODE(op) >= SLJIT_AND && GET_OPCODE(op) <= SLJIT_XOR));
CHECK_ARGUMENT((op & (SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_O | SLJIT_SET_C)) == 0);
@@ -1431,15 +1446,16 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_flags(struct sljit_com
#endif
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
- fprintf(compiler->verbose, " flags.%s%s%s%s ", !(op & SLJIT_I32_OP) ? "" : "i",
- GET_OPCODE(op) >= SLJIT_OP2_BASE ? op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE] : op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE],
- !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k");
+ fprintf(compiler->verbose, " flags %s%s%s%s, ",
+ !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k",
+ GET_OPCODE(op) < SLJIT_OP2_BASE ? "mov" : op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE],
+ GET_OPCODE(op) < SLJIT_OP2_BASE ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : ((op & SLJIT_I32_OP) ? "32" : ""));
sljit_verbose_param(compiler, dst, dstw);
if (src != SLJIT_UNUSED) {
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, src, srcw);
}
- fprintf(compiler->verbose, ", %s%s\n", JUMP_PREFIX(type), jump_names[type & 0xff]);
+ fprintf(compiler->verbose, ", %s%s\n", jump_names[type & 0xff], JUMP_POSTFIX(type));
}
#endif
CHECK_RETURN_OK;
@@ -1482,25 +1498,25 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_const(struct sljit_compil
#endif /* SLJIT_ARGUMENT_CHECKS || SLJIT_VERBOSE */
#define SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw) \
- SLJIT_COMPILE_ASSERT(!(SLJIT_CONVW_FROMD & 0x1) && !(SLJIT_CONVD_FROMW & 0x1), \
+ SLJIT_COMPILE_ASSERT(!(SLJIT_CONV_SW_FROM_F64 & 0x1) && !(SLJIT_CONV_F64_FROM_SW & 0x1), \
invalid_float_opcodes); \
- if (GET_OPCODE(op) >= SLJIT_CONVW_FROMD && GET_OPCODE(op) <= SLJIT_DCMP) { \
- if (GET_OPCODE(op) == SLJIT_DCMP) { \
+ if (GET_OPCODE(op) >= SLJIT_CONV_SW_FROM_F64 && GET_OPCODE(op) <= SLJIT_CMP_F64) { \
+ if (GET_OPCODE(op) == SLJIT_CMP_F64) { \
CHECK(check_sljit_emit_fop1_cmp(compiler, op, dst, dstw, src, srcw)); \
ADJUST_LOCAL_OFFSET(dst, dstw); \
ADJUST_LOCAL_OFFSET(src, srcw); \
return sljit_emit_fop1_cmp(compiler, op, dst, dstw, src, srcw); \
} \
- if ((GET_OPCODE(op) | 0x1) == SLJIT_CONVI_FROMD) { \
- CHECK(check_sljit_emit_fop1_convw_fromd(compiler, op, dst, dstw, src, srcw)); \
+ if ((GET_OPCODE(op) | 0x1) == SLJIT_CONV_S32_FROM_F64) { \
+ CHECK(check_sljit_emit_fop1_conv_sw_from_f64(compiler, op, dst, dstw, src, srcw)); \
ADJUST_LOCAL_OFFSET(dst, dstw); \
ADJUST_LOCAL_OFFSET(src, srcw); \
- return sljit_emit_fop1_convw_fromd(compiler, op, dst, dstw, src, srcw); \
+ return sljit_emit_fop1_conv_sw_from_f64(compiler, op, dst, dstw, src, srcw); \
} \
- CHECK(check_sljit_emit_fop1_convd_fromw(compiler, op, dst, dstw, src, srcw)); \
+ CHECK(check_sljit_emit_fop1_conv_f64_from_sw(compiler, op, dst, dstw, src, srcw)); \
ADJUST_LOCAL_OFFSET(dst, dstw); \
ADJUST_LOCAL_OFFSET(src, srcw); \
- return sljit_emit_fop1_convd_fromw(compiler, op, dst, dstw, src, srcw); \
+ return sljit_emit_fop1_conv_f64_from_sw(compiler, op, dst, dstw, src, srcw); \
} \
CHECK(check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw)); \
ADJUST_LOCAL_OFFSET(dst, dstw); \
@@ -1668,15 +1684,15 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compile
CHECK_PTR(check_sljit_emit_fcmp(compiler, type, src1, src1w, src2, src2w));
condition = type & 0xff;
- flags = (condition <= SLJIT_D_NOT_EQUAL) ? SLJIT_SET_E : SLJIT_SET_S;
- if (type & SLJIT_SINGLE_OP)
- flags |= SLJIT_SINGLE_OP;
+ flags = (condition <= SLJIT_NOT_EQUAL_F64) ? SLJIT_SET_E : SLJIT_SET_S;
+ if (type & SLJIT_F32_OP)
+ flags |= SLJIT_F32_OP;
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
compiler->skip_checks = 1;
#endif
- sljit_emit_fop1(compiler, SLJIT_DCMP | flags, src1, src1w, src2, src2w);
+ sljit_emit_fop1(compiler, SLJIT_CMP_F64 | flags, src1, src1w, src2, src2w);
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
diff --git a/sljit/sljitLir.h b/sljit/sljitLir.h
index 77cf6c2..df69b86 100644
--- a/sljit/sljitLir.h
+++ b/sljit/sljitLir.h
@@ -226,7 +226,7 @@ of sljitConfigInternal.h */
/* Floating point registers */
/* --------------------------------------------------------------------- */
-/* Each floating point register can store a double or single precision
+/* Each floating point register can store a 32 or a 64 bit precision
value. The FR and FS register sets are overlap in the same way as R
and S register sets. See above. */
@@ -624,31 +624,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
#define SLJIT_MEM2(r1, r2) (SLJIT_MEM | (r1) | ((r2) << 8))
#define SLJIT_IMM 0x40
-/* Set 32 bit operation mode (I) on 64 bit CPUs. The flag is totally ignored on
- 32 bit CPUs. If this flag is set for an arithmetic operation, it uses only the
- lower 32 bit of the input register(s), and set the CPU status flags according
- to the 32 bit result. The higher 32 bits are undefined for both the input and
- output. However, the CPU might not ignore those higher 32 bits, like MIPS, which
- expects it to be the sign extension of the lower 32 bit. All 32 bit operations
- are undefined, if this condition is not fulfilled. Therefore, when SLJIT_I32_OP
- is specified, all register arguments must be the result of other operations with
- the same SLJIT_I32_OP flag. In other words, although a register can hold either
- a 64 or 32 bit value, these values cannot be mixed. The only exceptions are
- SLJIT_IMOV and SLJIT_IMOVU (SLJIT_MOV_S32/SLJIT_MOVU_S32 with SLJIT_I32_OP flag)
- which can convert any source argument to SLJIT_I32_OP compatible result. This
- conversion might be unnecessary on some CPUs like x86-64, since the upper 32
- bit is always ignored. In this case SLJIT is clever enough to not generate any
- instructions if the source and destination operands are the same registers.
- Affects sljit_emit_op0, sljit_emit_op1 and sljit_emit_op2. */
+/* Set 32 bit operation mode (I) on 64 bit CPUs. This flag is ignored on 32
+ bit CPUs. When this flag is set for an arithmetic operation, only the
+ lower 32 bit of the input register(s) are used, and the CPU status flags
+ are set according to the 32 bit result. Although the higher 32 bit of
+ the input and the result registers are not defined by SLJIT, it might be
+ defined by the CPU architecture (e.g. MIPS). To satisfy these requirements
+ all source registers must be computed by operations where this flag is
+ also set. In other words 32 and 64 bit arithmetic operations cannot be
+ mixed. The only exception is SLJIT_IMOV and SLJIT_IMOVU whose source
+ register can hold any 32 or 64 bit value. This source register is
+ converted to a 32 bit compatible format. SLJIT does not generate any
+ instructions on certain CPUs (e.g. on x86 and ARM) if the source and
+ destination operands are the same registers. Affects sljit_emit_op0,
+ sljit_emit_op1 and sljit_emit_op2. */
#define SLJIT_I32_OP 0x100
-/* Single precision mode (SP). This flag is similar to SLJIT_I32_OP, just
+/* F32 precision mode (SP). This flag is similar to SLJIT_I32_OP, just
it applies to floating point registers (it is even the same bit). When
- this flag is passed, the CPU performs single precision floating point
- operations. Similar to SLJIT_I32_OP, all register arguments must be the
- result of other floating point operations with this flag. Affects
+ this flag is passed, the CPU performs 32 bit floating point operations.
+ Similar to SLJIT_I32_OP, all register arguments must be computed by
+ floating point operations where this flag is also set. Affects
sljit_emit_fop1, sljit_emit_fop2 and sljit_emit_fcmp. */
-#define SLJIT_SINGLE_OP 0x100
+#define SLJIT_F32_OP 0x100
/* Common CPU status flags for all architectures (x86, ARM, PPC)
- carry flag
@@ -697,41 +695,39 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler
/* Flags: - (may destroy flags)
Unsigned multiplication of SLJIT_R0 and SLJIT_R1.
Result is placed into SLJIT_R1:SLJIT_R0 (high:low) word */
-#define SLJIT_LUMUL (SLJIT_OP0_BASE + 2)
+#define SLJIT_LMUL_UW (SLJIT_OP0_BASE + 2)
/* Flags: - (may destroy flags)
Signed multiplication of SLJIT_R0 and SLJIT_R1.
Result is placed into SLJIT_R1:SLJIT_R0 (high:low) word */
-#define SLJIT_LSMUL (SLJIT_OP0_BASE + 3)
+#define SLJIT_LMUL_SW (SLJIT_OP0_BASE + 3)
/* Flags: I - (may destroy flags)
Unsigned divide of the value in SLJIT_R0 by the value in SLJIT_R1.
The result is placed into SLJIT_R0 and the remainder into SLJIT_R1.
Note: if SLJIT_R1 is 0, the behaviour is undefined. */
-#define SLJIT_UDIVMOD (SLJIT_OP0_BASE + 4)
-#define SLJIT_IUDIVMOD (SLJIT_UDIVMOD | SLJIT_I32_OP)
+#define SLJIT_DIVMOD_UW (SLJIT_OP0_BASE + 4)
+#define SLJIT_DIVMOD_U32 (SLJIT_DIVMOD_UW | SLJIT_I32_OP)
/* Flags: I - (may destroy flags)
Signed divide of the value in SLJIT_R0 by the value in SLJIT_R1.
The result is placed into SLJIT_R0 and the remainder into SLJIT_R1.
Note: if SLJIT_R1 is 0, the behaviour is undefined.
Note: if SLJIT_R1 is -1 and SLJIT_R0 is integer min (0x800..00),
the behaviour is undefined. */
-#define SLJIT_SDIVMOD (SLJIT_OP0_BASE + 5)
-#define SLJIT_ISDIVMOD (SLJIT_SDIVMOD | SLJIT_I32_OP)
+#define SLJIT_DIVMOD_SW (SLJIT_OP0_BASE + 5)
+#define SLJIT_DIVMOD_S32 (SLJIT_DIVMOD_SW | SLJIT_I32_OP)
/* Flags: I - (may destroy flags)
Unsigned divide of the value in SLJIT_R0 by the value in SLJIT_R1.
The result is placed into SLJIT_R0. SLJIT_R1 preserves its value.
- Note: if SLJIT_R1 is 0, the behaviour is undefined.
- Note: SLJIT_SDIV is single precision divide. */
-#define SLJIT_UDIVI (SLJIT_OP0_BASE + 6)
-#define SLJIT_IUDIVI (SLJIT_UDIVI | SLJIT_I32_OP)
+ Note: if SLJIT_R1 is 0, the behaviour is undefined. */
+#define SLJIT_DIV_UW (SLJIT_OP0_BASE + 6)
+#define SLJIT_DIV_U32 (SLJIT_DIV_UW | SLJIT_I32_OP)
/* Flags: I - (may destroy flags)
Signed divide of the value in SLJIT_R0 by the value in SLJIT_R1.
The result is placed into SLJIT_R0. SLJIT_R1 preserves its value.
Note: if SLJIT_R1 is 0, the behaviour is undefined.
Note: if SLJIT_R1 is -1 and SLJIT_R0 is integer min (0x800..00),
- the behaviour is undefined.
- Note: SLJIT_SDIV is single precision divide. */
-#define SLJIT_SDIVI (SLJIT_OP0_BASE + 7)
-#define SLJIT_ISDIVI (SLJIT_SDIVI | SLJIT_I32_OP)
+ the behaviour is undefined. */
+#define SLJIT_DIV_SW (SLJIT_OP0_BASE + 7)
+#define SLJIT_DIV_S32 (SLJIT_DIV_SW | SLJIT_I32_OP)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op);
@@ -753,62 +749,62 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
#define SLJIT_MOV (SLJIT_OP1_BASE + 0)
/* Flags: I - (never set any flags) */
#define SLJIT_MOV_U8 (SLJIT_OP1_BASE + 1)
-#define SLJIT_IMOV_U8 (SLJIT_MOV_U8 | SLJIT_I32_OP)
+#define SLJIT_MOV32_U8 (SLJIT_MOV_U8 | SLJIT_I32_OP)
/* Flags: I - (never set any flags) */
#define SLJIT_MOV_S8 (SLJIT_OP1_BASE + 2)
-#define SLJIT_IMOV_S8 (SLJIT_MOV_S8 | SLJIT_I32_OP)
+#define SLJIT_MOV32_S8 (SLJIT_MOV_S8 | SLJIT_I32_OP)
/* Flags: I - (never set any flags) */
#define SLJIT_MOV_U16 (SLJIT_OP1_BASE + 3)
-#define SLJIT_IMOV_U16 (SLJIT_MOV_U16 | SLJIT_I32_OP)
+#define SLJIT_MOV32_U16 (SLJIT_MOV_U16 | SLJIT_I32_OP)
/* Flags: I - (never set any flags) */
#define SLJIT_MOV_S16 (SLJIT_OP1_BASE + 4)
-#define SLJIT_IMOV_S16 (SLJIT_MOV_S16 | SLJIT_I32_OP)
+#define SLJIT_MOV32_S16 (SLJIT_MOV_S16 | SLJIT_I32_OP)
/* Flags: I - (never set any flags)
- Note: see SLJIT_I32_OP for further details. */
+ Note: no SLJIT_MOV32_U32 form, since it is the same as SLJIT_MOV32 */
#define SLJIT_MOV_U32 (SLJIT_OP1_BASE + 5)
-/* No SLJIT_I32_OP form, since it is the same as SLJIT_IMOV. */
/* Flags: I - (never set any flags)
- Note: see SLJIT_I32_OP for further details. */
+ Note: no SLJIT_MOV32_S32 form, since it is the same as SLJIT_MOV32 */
#define SLJIT_MOV_S32 (SLJIT_OP1_BASE + 6)
-#define SLJIT_IMOV (SLJIT_MOV_S32 | SLJIT_I32_OP)
+/* Flags: I - (never set any flags) */
+#define SLJIT_MOV32 (SLJIT_MOV_S32 | SLJIT_I32_OP)
/* Flags: - (never set any flags) */
#define SLJIT_MOV_P (SLJIT_OP1_BASE + 7)
/* Flags: - (never set any flags) */
#define SLJIT_MOVU (SLJIT_OP1_BASE + 8)
/* Flags: I - (never set any flags) */
#define SLJIT_MOVU_U8 (SLJIT_OP1_BASE + 9)
-#define SLJIT_IMOVU_U8 (SLJIT_MOVU_U8 | SLJIT_I32_OP)
+#define SLJIT_MOVU32_U8 (SLJIT_MOVU_U8 | SLJIT_I32_OP)
/* Flags: I - (never set any flags) */
#define SLJIT_MOVU_S8 (SLJIT_OP1_BASE + 10)
-#define SLJIT_IMOVU_S8 (SLJIT_MOVU_S8 | SLJIT_I32_OP)
+#define SLJIT_MOVU32_S8 (SLJIT_MOVU_S8 | SLJIT_I32_OP)
/* Flags: I - (never set any flags) */
#define SLJIT_MOVU_U16 (SLJIT_OP1_BASE + 11)
-#define SLJIT_IMOVU_U16 (SLJIT_MOVU_U16 | SLJIT_I32_OP)
+#define SLJIT_MOVU32_U16 (SLJIT_MOVU_U16 | SLJIT_I32_OP)
/* Flags: I - (never set any flags) */
#define SLJIT_MOVU_S16 (SLJIT_OP1_BASE + 12)
-#define SLJIT_IMOVU_S16 (SLJIT_MOVU_S16 | SLJIT_I32_OP)
+#define SLJIT_MOVU32_S16 (SLJIT_MOVU_S16 | SLJIT_I32_OP)
/* Flags: I - (never set any flags)
- Note: see SLJIT_I32_OP for further details. */
+ Note: no SLJIT_MOVU32_U32 form, since it is the same as SLJIT_MOVU32 */
#define SLJIT_MOVU_U32 (SLJIT_OP1_BASE + 13)
-/* No SLJIT_I32_OP form, since it is the same as SLJIT_IMOVU. */
/* Flags: I - (never set any flags)
- Note: see SLJIT_I32_OP for further details. */
+ Note: no SLJIT_MOVU32_S32 form, since it is the same as SLJIT_MOVU32 */
#define SLJIT_MOVU_S32 (SLJIT_OP1_BASE + 14)
-#define SLJIT_IMOVU (SLJIT_MOVU_S32 | SLJIT_I32_OP)
+/* Flags: I - (never set any flags) */
+#define SLJIT_MOVU32 (SLJIT_MOVU_S32 | SLJIT_I32_OP)
/* Flags: - (never set any flags) */
#define SLJIT_MOVU_P (SLJIT_OP1_BASE + 15)
/* Flags: I | E | K */
#define SLJIT_NOT (SLJIT_OP1_BASE + 16)
-#define SLJIT_INOT (SLJIT_NOT | SLJIT_I32_OP)
+#define SLJIT_NOT32 (SLJIT_NOT | SLJIT_I32_OP)
/* Flags: I | E | O | K */
#define SLJIT_NEG (SLJIT_OP1_BASE + 17)
-#define SLJIT_INEG (SLJIT_NEG | SLJIT_I32_OP)
+#define SLJIT_NEG32 (SLJIT_NEG | SLJIT_I32_OP)
/* Count leading zeroes
Flags: I | E | K
Important note! Sparc 32 does not support K flag, since
the required popc instruction is introduced only in sparc 64. */
#define SLJIT_CLZ (SLJIT_OP1_BASE + 18)
-#define SLJIT_ICLZ (SLJIT_CLZ | SLJIT_I32_OP)
+#define SLJIT_CLZ32 (SLJIT_CLZ | SLJIT_I32_OP)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
@@ -819,50 +815,50 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
/* Flags: I | E | O | C | K */
#define SLJIT_ADD (SLJIT_OP2_BASE + 0)
-#define SLJIT_IADD (SLJIT_ADD | SLJIT_I32_OP)
+#define SLJIT_ADD32 (SLJIT_ADD | SLJIT_I32_OP)
/* Flags: I | C | K */
#define SLJIT_ADDC (SLJIT_OP2_BASE + 1)
-#define SLJIT_IADDC (SLJIT_ADDC | SLJIT_I32_OP)
+#define SLJIT_ADDC32 (SLJIT_ADDC | SLJIT_I32_OP)
/* Flags: I | E | U | S | O | C | K */
#define SLJIT_SUB (SLJIT_OP2_BASE + 2)
-#define SLJIT_ISUB (SLJIT_SUB | SLJIT_I32_OP)
+#define SLJIT_SUB32 (SLJIT_SUB | SLJIT_I32_OP)
/* Flags: I | C | K */
#define SLJIT_SUBC (SLJIT_OP2_BASE + 3)
-#define SLJIT_ISUBC (SLJIT_SUBC | SLJIT_I32_OP)
+#define SLJIT_SUBC32 (SLJIT_SUBC | SLJIT_I32_OP)
/* Note: integer mul
Flags: I | O (see SLJIT_C_MUL_*) | K */
#define SLJIT_MUL (SLJIT_OP2_BASE + 4)
-#define SLJIT_IMUL (SLJIT_MUL | SLJIT_I32_OP)
+#define SLJIT_MUL32 (SLJIT_MUL | SLJIT_I32_OP)
/* Flags: I | E | K */
#define SLJIT_AND (SLJIT_OP2_BASE + 5)
-#define SLJIT_IAND (SLJIT_AND | SLJIT_I32_OP)
+#define SLJIT_AND32 (SLJIT_AND | SLJIT_I32_OP)
/* Flags: I | E | K */
#define SLJIT_OR (SLJIT_OP2_BASE + 6)
-#define SLJIT_IOR (SLJIT_OR | SLJIT_I32_OP)
+#define SLJIT_OR32 (SLJIT_OR | SLJIT_I32_OP)
/* Flags: I | E | K */
#define SLJIT_XOR (SLJIT_OP2_BASE + 7)
-#define SLJIT_IXOR (SLJIT_XOR | SLJIT_I32_OP)
+#define SLJIT_XOR32 (SLJIT_XOR | SLJIT_I32_OP)
/* Flags: I | E | K
Let bit_length be the length of the shift operation: 32 or 64.
If src2 is immediate, src2w is masked by (bit_length - 1).
Otherwise, if the content of src2 is outside the range from 0
to bit_length - 1, the result is undefined. */
#define SLJIT_SHL (SLJIT_OP2_BASE + 8)
-#define SLJIT_ISHL (SLJIT_SHL | SLJIT_I32_OP)
+#define SLJIT_SHL32 (SLJIT_SHL | SLJIT_I32_OP)
/* Flags: I | E | K
Let bit_length be the length of the shift operation: 32 or 64.
If src2 is immediate, src2w is masked by (bit_length - 1).
Otherwise, if the content of src2 is outside the range from 0
to bit_length - 1, the result is undefined. */
#define SLJIT_LSHR (SLJIT_OP2_BASE + 9)
-#define SLJIT_ILSHR (SLJIT_LSHR | SLJIT_I32_OP)
+#define SLJIT_LSHR32 (SLJIT_LSHR | SLJIT_I32_OP)
/* Flags: I | E | K
Let bit_length be the length of the shift operation: 32 or 64.
If src2 is immediate, src2w is masked by (bit_length - 1).
Otherwise, if the content of src2 is outside the range from 0
to bit_length - 1, the result is undefined. */
#define SLJIT_ASHR (SLJIT_OP2_BASE + 10)
-#define SLJIT_IASHR (SLJIT_ASHR | SLJIT_I32_OP)
+#define SLJIT_ASHR32 (SLJIT_ASHR | SLJIT_I32_OP)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
@@ -877,38 +873,38 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_is_fpu_available(void);
#define SLJIT_FOP1_BASE 128
/* Flags: SP - (never set any flags) */
-#define SLJIT_DMOV (SLJIT_FOP1_BASE + 0)
-#define SLJIT_SMOV (SLJIT_DMOV | SLJIT_SINGLE_OP)
+#define SLJIT_MOV_F64 (SLJIT_FOP1_BASE + 0)
+#define SLJIT_MOV_F32 (SLJIT_MOV_F64 | SLJIT_F32_OP)
/* Convert opcodes: CONV[DST_TYPE].FROM[SRC_TYPE]
SRC/DST TYPE can be: D - double, S - single, W - signed word, I - signed int
Rounding mode when the destination is W or I: round towards zero. */
/* Flags: SP - (never set any flags) */
-#define SLJIT_CONVD_FROMS (SLJIT_FOP1_BASE + 1)
-#define SLJIT_CONVS_FROMD (SLJIT_CONVD_FROMS | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_F64_FROM_F32 (SLJIT_FOP1_BASE + 1)
+#define SLJIT_CONV_F32_FROM_F64 (SLJIT_CONV_F64_FROM_F32 | SLJIT_F32_OP)
/* Flags: SP - (never set any flags) */
-#define SLJIT_CONVW_FROMD (SLJIT_FOP1_BASE + 2)
-#define SLJIT_CONVW_FROMS (SLJIT_CONVW_FROMD | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_SW_FROM_F64 (SLJIT_FOP1_BASE + 2)
+#define SLJIT_CONV_SW_FROM_F32 (SLJIT_CONV_SW_FROM_F64 | SLJIT_F32_OP)
/* Flags: SP - (never set any flags) */
-#define SLJIT_CONVI_FROMD (SLJIT_FOP1_BASE + 3)
-#define SLJIT_CONVI_FROMS (SLJIT_CONVI_FROMD | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_S32_FROM_F64 (SLJIT_FOP1_BASE + 3)
+#define SLJIT_CONV_S32_FROM_F32 (SLJIT_CONV_S32_FROM_F64 | SLJIT_F32_OP)
/* Flags: SP - (never set any flags) */
-#define SLJIT_CONVD_FROMW (SLJIT_FOP1_BASE + 4)
-#define SLJIT_CONVS_FROMW (SLJIT_CONVD_FROMW | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_F64_FROM_SW (SLJIT_FOP1_BASE + 4)
+#define SLJIT_CONV_F32_FROM_SW (SLJIT_CONV_F64_FROM_SW | SLJIT_F32_OP)
/* Flags: SP - (never set any flags) */
-#define SLJIT_CONVD_FROMI (SLJIT_FOP1_BASE + 5)
-#define SLJIT_CONVS_FROMI (SLJIT_CONVD_FROMI | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_F64_FROM_S32 (SLJIT_FOP1_BASE + 5)
+#define SLJIT_CONV_F32_FROM_S32 (SLJIT_CONV_F64_FROM_S32 | SLJIT_F32_OP)
/* Note: dst is the left and src is the right operand for SLJIT_CMPD.
Note: NaN check is always performed. If SLJIT_C_FLOAT_UNORDERED flag
is set, the comparison result is unpredictable.
Flags: SP | E | S (see SLJIT_C_FLOAT_*) */
-#define SLJIT_DCMP (SLJIT_FOP1_BASE + 6)
-#define SLJIT_SCMP (SLJIT_DCMP | SLJIT_SINGLE_OP)
+#define SLJIT_CMP_F64 (SLJIT_FOP1_BASE + 6)
+#define SLJIT_CMP_F32 (SLJIT_CMP_F64 | SLJIT_F32_OP)
/* Flags: SP - (never set any flags) */
-#define SLJIT_DNEG (SLJIT_FOP1_BASE + 7)
-#define SLJIT_SNEG (SLJIT_DNEG | SLJIT_SINGLE_OP)
+#define SLJIT_NEG_F64 (SLJIT_FOP1_BASE + 7)
+#define SLJIT_NEG_F32 (SLJIT_NEG_F64 | SLJIT_F32_OP)
/* Flags: SP - (never set any flags) */
-#define SLJIT_DABS (SLJIT_FOP1_BASE + 8)
-#define SLJIT_SABS (SLJIT_DABS | SLJIT_SINGLE_OP)
+#define SLJIT_ABS_F64 (SLJIT_FOP1_BASE + 8)
+#define SLJIT_ABS_F32 (SLJIT_ABS_F64 | SLJIT_F32_OP)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
@@ -918,17 +914,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
#define SLJIT_FOP2_BASE 160
/* Flags: SP - (never set any flags) */
-#define SLJIT_DADD (SLJIT_FOP2_BASE + 0)
-#define SLJIT_SADD (SLJIT_DADD | SLJIT_SINGLE_OP)
+#define SLJIT_ADD_F64 (SLJIT_FOP2_BASE + 0)
+#define SLJIT_ADD_F32 (SLJIT_ADD_F64 | SLJIT_F32_OP)
/* Flags: SP - (never set any flags) */
-#define SLJIT_DSUB (SLJIT_FOP2_BASE + 1)
-#define SLJIT_SSUB (SLJIT_DSUB | SLJIT_SINGLE_OP)
+#define SLJIT_SUB_F64 (SLJIT_FOP2_BASE + 1)
+#define SLJIT_SUB_F32 (SLJIT_SUB_F64 | SLJIT_F32_OP)
/* Flags: SP - (never set any flags) */
-#define SLJIT_DMUL (SLJIT_FOP2_BASE + 2)
-#define SLJIT_SMUL (SLJIT_DMUL | SLJIT_SINGLE_OP)
+#define SLJIT_MUL_F64 (SLJIT_FOP2_BASE + 2)
+#define SLJIT_MUL_F32 (SLJIT_MUL_F64 | SLJIT_F32_OP)
/* Flags: SP - (never set any flags) */
-#define SLJIT_DDIV (SLJIT_FOP2_BASE + 3)
-#define SLJIT_SDIV (SLJIT_DDIV | SLJIT_SINGLE_OP)
+#define SLJIT_DIV_F64 (SLJIT_FOP2_BASE + 3)
+#define SLJIT_DIV_F32 (SLJIT_DIV_F64 | SLJIT_F32_OP)
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
@@ -943,58 +939,58 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi
/* Integer comparison types. */
#define SLJIT_EQUAL 0
-#define SLJIT_I_EQUAL (SLJIT_EQUAL | SLJIT_I32_OP)
+#define SLJIT_EQUAL32 (SLJIT_EQUAL | SLJIT_I32_OP)
#define SLJIT_ZERO 0
-#define SLJIT_I_ZERO (SLJIT_ZERO | SLJIT_I32_OP)
+#define SLJIT_ZERO32 (SLJIT_ZERO | SLJIT_I32_OP)
#define SLJIT_NOT_EQUAL 1
-#define SLJIT_I_NOT_EQUAL (SLJIT_NOT_EQUAL | SLJIT_I32_OP)
+#define SLJIT_NOT_EQUAL32 (SLJIT_NOT_EQUAL | SLJIT_I32_OP)
#define SLJIT_NOT_ZERO 1
-#define SLJIT_I_NOT_ZERO (SLJIT_NOT_ZERO | SLJIT_I32_OP)
+#define SLJIT_NOT_ZERO32 (SLJIT_NOT_ZERO | SLJIT_I32_OP)
#define SLJIT_LESS 2
-#define SLJIT_I_LESS (SLJIT_LESS | SLJIT_I32_OP)
+#define SLJIT_LESS32 (SLJIT_LESS | SLJIT_I32_OP)
#define SLJIT_GREATER_EQUAL 3
-#define SLJIT_I_GREATER_EQUAL (SLJIT_GREATER_EQUAL | SLJIT_I32_OP)
+#define SLJIT_GREATER_EQUAL32 (SLJIT_GREATER_EQUAL | SLJIT_I32_OP)
#define SLJIT_GREATER 4
-#define SLJIT_I_GREATER (SLJIT_GREATER | SLJIT_I32_OP)
+#define SLJIT_GREATER32 (SLJIT_GREATER | SLJIT_I32_OP)
#define SLJIT_LESS_EQUAL 5
-#define SLJIT_I_LESS_EQUAL (SLJIT_LESS_EQUAL | SLJIT_I32_OP)
+#define SLJIT_LESS_EQUAL32 (SLJIT_LESS_EQUAL | SLJIT_I32_OP)
#define SLJIT_SIG_LESS 6
-#define SLJIT_I_SIG_LESS (SLJIT_SIG_LESS | SLJIT_I32_OP)
+#define SLJIT_SIG_LESS32 (SLJIT_SIG_LESS | SLJIT_I32_OP)
#define SLJIT_SIG_GREATER_EQUAL 7
-#define SLJIT_I_SIG_GREATER_EQUAL (SLJIT_SIG_GREATER_EQUAL | SLJIT_I32_OP)
+#define SLJIT_SIG_GREATER_EQUAL32 (SLJIT_SIG_GREATER_EQUAL | SLJIT_I32_OP)
#define SLJIT_SIG_GREATER 8
-#define SLJIT_I_SIG_GREATER (SLJIT_SIG_GREATER | SLJIT_I32_OP)
+#define SLJIT_SIG_GREATER32 (SLJIT_SIG_GREATER | SLJIT_I32_OP)
#define SLJIT_SIG_LESS_EQUAL 9
-#define SLJIT_I_SIG_LESS_EQUAL (SLJIT_SIG_LESS_EQUAL | SLJIT_I32_OP)
+#define SLJIT_SIG_LESS_EQUAL32 (SLJIT_SIG_LESS_EQUAL | SLJIT_I32_OP)
#define SLJIT_OVERFLOW 10
-#define SLJIT_I_OVERFLOW (SLJIT_OVERFLOW | SLJIT_I32_OP)
+#define SLJIT_OVERFLOW32 (SLJIT_OVERFLOW | SLJIT_I32_OP)
#define SLJIT_NOT_OVERFLOW 11
-#define SLJIT_I_NOT_OVERFLOW (SLJIT_NOT_OVERFLOW | SLJIT_I32_OP)
+#define SLJIT_NOT_OVERFLOW32 (SLJIT_NOT_OVERFLOW | SLJIT_I32_OP)
#define SLJIT_MUL_OVERFLOW 12
-#define SLJIT_I_MUL_OVERFLOW (SLJIT_MUL_OVERFLOW | SLJIT_I32_OP)
+#define SLJIT_MUL_OVERFLOW32 (SLJIT_MUL_OVERFLOW | SLJIT_I32_OP)
#define SLJIT_MUL_NOT_OVERFLOW 13
-#define SLJIT_I_MUL_NOT_OVERFLOW (SLJIT_MUL_NOT_OVERFLOW | SLJIT_I32_OP)
+#define SLJIT_MUL_NOT_OVERFLOW32 (SLJIT_MUL_NOT_OVERFLOW | SLJIT_I32_OP)
/* Floating point comparison types. */
-#define SLJIT_D_EQUAL 14
-#define SLJIT_S_EQUAL (SLJIT_D_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_NOT_EQUAL 15
-#define SLJIT_S_NOT_EQUAL (SLJIT_D_NOT_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_LESS 16
-#define SLJIT_S_LESS (SLJIT_D_LESS | SLJIT_SINGLE_OP)
-#define SLJIT_D_GREATER_EQUAL 17
-#define SLJIT_S_GREATER_EQUAL (SLJIT_D_GREATER_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_GREATER 18
-#define SLJIT_S_GREATER (SLJIT_D_GREATER | SLJIT_SINGLE_OP)
-#define SLJIT_D_LESS_EQUAL 19
-#define SLJIT_S_LESS_EQUAL (SLJIT_D_LESS_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_UNORDERED 20
-#define SLJIT_S_UNORDERED (SLJIT_D_UNORDERED | SLJIT_SINGLE_OP)
-#define SLJIT_D_ORDERED 21
-#define SLJIT_S_ORDERED (SLJIT_D_ORDERED | SLJIT_SINGLE_OP)
+#define SLJIT_EQUAL_F64 14
+#define SLJIT_EQUAL_F32 (SLJIT_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_NOT_EQUAL_F64 15
+#define SLJIT_NOT_EQUAL_F32 (SLJIT_NOT_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_LESS_F64 16
+#define SLJIT_LESS_F32 (SLJIT_LESS_F64 | SLJIT_F32_OP)
+#define SLJIT_GREATER_EQUAL_F64 17
+#define SLJIT_GREATER_EQUAL_F32 (SLJIT_GREATER_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_GREATER_F64 18
+#define SLJIT_GREATER_F32 (SLJIT_GREATER_F64 | SLJIT_F32_OP)
+#define SLJIT_LESS_EQUAL_F64 19
+#define SLJIT_LESS_EQUAL_F32 (SLJIT_LESS_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_UNORDERED_F64 20
+#define SLJIT_UNORDERED_F32 (SLJIT_UNORDERED_F64 | SLJIT_F32_OP)
+#define SLJIT_ORDERED_F64 21
+#define SLJIT_ORDERED_F32 (SLJIT_ORDERED_F64 | SLJIT_F32_OP)
/* Unconditional jump types. */
#define SLJIT_JUMP 22
@@ -1033,7 +1029,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler
sljit_emit_jump. However some architectures (i.e: MIPS) may employ
special optimizations here. It is suggested to use this comparison form
when appropriate.
- type must be between SLJIT_D_EQUAL and SLJIT_S_ORDERED
+ type must be between SLJIT_EQUAL_F64 and SLJIT_ORDERED_F32
type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
Flags: destroy flags.
Note: if either operand is NaN, the behaviour is undefined for
diff --git a/sljit/sljitNativeARM_32.c b/sljit/sljitNativeARM_32.c
index beb7b6c..b92808f 100644
--- a/sljit/sljitNativeARM_32.c
+++ b/sljit/sljitNativeARM_32.c
@@ -1817,49 +1817,49 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
case SLJIT_NOP:
FAIL_IF(push_inst(compiler, NOP));
break;
- case SLJIT_LUMUL:
- case SLJIT_LSMUL:
+ case SLJIT_LMUL_UW:
+ case SLJIT_LMUL_SW:
#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
- return push_inst(compiler, (op == SLJIT_LUMUL ? UMULL : SMULL)
+ return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
| (reg_map[SLJIT_R1] << 16)
| (reg_map[SLJIT_R0] << 12)
| (reg_map[SLJIT_R0] << 8)
| reg_map[SLJIT_R1]);
#else
FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG1, SLJIT_UNUSED, RM(SLJIT_R1))));
- return push_inst(compiler, (op == SLJIT_LUMUL ? UMULL : SMULL)
+ return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
| (reg_map[SLJIT_R1] << 16)
| (reg_map[SLJIT_R0] << 12)
| (reg_map[SLJIT_R0] << 8)
| reg_map[TMP_REG1]);
#endif
- case SLJIT_UDIVMOD:
- case SLJIT_SDIVMOD:
- case SLJIT_UDIVI:
- case SLJIT_SDIVI:
- SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+ case SLJIT_DIVMOD_UW:
+ case SLJIT_DIVMOD_SW:
+ case SLJIT_DIV_UW:
+ case SLJIT_DIV_SW:
+ SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
SLJIT_COMPILE_ASSERT(reg_map[2] == 1 && reg_map[3] == 2, bad_register_mapping);
- if ((op >= SLJIT_UDIVI) && (compiler->scratches >= 3)) {
+ if ((op >= SLJIT_DIV_UW) && (compiler->scratches >= 3)) {
FAIL_IF(push_inst(compiler, 0xe52d2008 /* str r2, [sp, #-8]! */));
FAIL_IF(push_inst(compiler, 0xe58d1004 /* str r1, [sp, #4] */));
}
- else if ((op >= SLJIT_UDIVI) || (compiler->scratches >= 3))
- FAIL_IF(push_inst(compiler, 0xe52d0008 | (op >= SLJIT_UDIVI ? 0x1000 : 0x2000) /* str r1/r2, [sp, #-8]! */));
+ else if ((op >= SLJIT_DIV_UW) || (compiler->scratches >= 3))
+ FAIL_IF(push_inst(compiler, 0xe52d0008 | (op >= SLJIT_DIV_UW ? 0x1000 : 0x2000) /* str r1/r2, [sp, #-8]! */));
#if defined(__GNUC__)
FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM,
- ((op | 0x2) == SLJIT_UDIVI ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
+ ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
#else
#error "Software divmod functions are needed"
#endif
- if ((op >= SLJIT_UDIVI) && (compiler->scratches >= 3)) {
+ if ((op >= SLJIT_DIV_UW) && (compiler->scratches >= 3)) {
FAIL_IF(push_inst(compiler, 0xe59d1004 /* ldr r1, [sp, #4] */));
FAIL_IF(push_inst(compiler, 0xe49d2008 /* ldr r2, [sp], #8 */));
}
- else if ((op >= SLJIT_UDIVI) || (compiler->scratches >= 3))
- return push_inst(compiler, 0xe49d0008 | (op >= SLJIT_UDIVI ? 0x1000 : 0x2000) /* ldr r1/r2, [sp], #8 */);
+ else if ((op >= SLJIT_DIV_UW) || (compiler->scratches >= 3))
+ return push_inst(compiler, 0xe49d0008 | (op >= SLJIT_DIV_UW ? 0x1000 : 0x2000) /* ldr r1/r2, [sp], #8 */);
return SLJIT_SUCCESS;
}
@@ -2044,7 +2044,7 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
{
sljit_sw tmp;
sljit_uw imm;
- sljit_sw inst = VSTR_F32 | (flags & (SLJIT_SINGLE_OP | FPU_LOAD));
+ sljit_sw inst = VSTR_F32 | (flags & (SLJIT_F32_OP | FPU_LOAD));
SLJIT_ASSERT(arg & SLJIT_MEM);
if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
@@ -2104,16 +2104,16 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG3, reg, 0));
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
if (src & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
src = TMP_FREG1;
}
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_SINGLE_OP, TMP_FREG1, src, 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_F32_OP, TMP_FREG1, src, 0)));
if (dst == SLJIT_UNUSED)
return SLJIT_SUCCESS;
@@ -2125,7 +2125,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler
return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw);
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -2142,10 +2142,10 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | (TMP_FREG1 << 16)));
}
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F32_S32, op & SLJIT_SINGLE_OP, dst_r, TMP_FREG1, 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F32_S32, op & SLJIT_F32_OP, dst_r, TMP_FREG1, 0)));
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw);
+ return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
return SLJIT_SUCCESS;
}
@@ -2154,16 +2154,16 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
sljit_s32 src2, sljit_sw src2w)
{
if (src1 & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_SINGLE_OP, src1, src2, 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_F32_OP, src1, src2, 0)));
return push_inst(compiler, VMRS);
}
@@ -2176,42 +2176,42 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
CHECK_ERROR();
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- if (GET_OPCODE(op) != SLJIT_CONVD_FROMS)
- op ^= SLJIT_SINGLE_OP;
+ if (GET_OPCODE(op) != SLJIT_CONV_F64_FROM_F32)
+ op ^= SLJIT_F32_OP;
- SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, dst_r, src, srcw));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, dst_r, src, srcw));
src = dst_r;
}
switch (GET_OPCODE(op)) {
- case SLJIT_DMOV:
+ case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1)
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
else
dst_r = src;
}
break;
- case SLJIT_DNEG:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
+ case SLJIT_NEG_F64:
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
break;
- case SLJIT_DABS:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
+ case SLJIT_ABS_F64:
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
break;
- case SLJIT_CONVD_FROMS:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F64_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
- op ^= SLJIT_SINGLE_OP;
+ case SLJIT_CONV_F64_FROM_F32:
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F64_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
+ op ^= SLJIT_F32_OP;
break;
}
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), dst_r, dst, dstw);
+ return emit_fop_mem(compiler, (op & SLJIT_F32_OP), dst_r, dst, dstw);
return SLJIT_SUCCESS;
}
@@ -2230,40 +2230,40 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- op ^= SLJIT_SINGLE_OP;
+ op ^= SLJIT_F32_OP;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src2 & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
if (src1 & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
switch (GET_OPCODE(op)) {
- case SLJIT_DADD:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+ case SLJIT_ADD_F64:
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
break;
- case SLJIT_DSUB:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+ case SLJIT_SUB_F64:
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
break;
- case SLJIT_DMUL:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+ case SLJIT_MUL_F64:
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
break;
- case SLJIT_DDIV:
- FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+ case SLJIT_DIV_F64:
+ FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
break;
}
if (dst_r == TMP_FREG1)
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw));
return SLJIT_SUCCESS;
}
@@ -2331,28 +2331,28 @@ static sljit_uw get_cc(sljit_s32 type)
switch (type) {
case SLJIT_EQUAL:
case SLJIT_MUL_NOT_OVERFLOW:
- case SLJIT_D_EQUAL:
+ case SLJIT_EQUAL_F64:
return 0x00000000;
case SLJIT_NOT_EQUAL:
case SLJIT_MUL_OVERFLOW:
- case SLJIT_D_NOT_EQUAL:
+ case SLJIT_NOT_EQUAL_F64:
return 0x10000000;
case SLJIT_LESS:
- case SLJIT_D_LESS:
+ case SLJIT_LESS_F64:
return 0x30000000;
case SLJIT_GREATER_EQUAL:
- case SLJIT_D_GREATER_EQUAL:
+ case SLJIT_GREATER_EQUAL_F64:
return 0x20000000;
case SLJIT_GREATER:
- case SLJIT_D_GREATER:
+ case SLJIT_GREATER_F64:
return 0x80000000;
case SLJIT_LESS_EQUAL:
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_LESS_EQUAL_F64:
return 0x90000000;
case SLJIT_SIG_LESS:
@@ -2368,11 +2368,11 @@ static sljit_uw get_cc(sljit_s32 type)
return 0xd0000000;
case SLJIT_OVERFLOW:
- case SLJIT_D_UNORDERED:
+ case SLJIT_UNORDERED_F64:
return 0x60000000;
case SLJIT_NOT_OVERFLOW:
- case SLJIT_D_ORDERED:
+ case SLJIT_ORDERED_F64:
return 0x70000000;
default:
diff --git a/sljit/sljitNativeARM_64.c b/sljit/sljitNativeARM_64.c
index 91dad5e..d995851 100644
--- a/sljit/sljitNativeARM_64.c
+++ b/sljit/sljitNativeARM_64.c
@@ -1256,20 +1256,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
return push_inst(compiler, BRK);
case SLJIT_NOP:
return push_inst(compiler, NOP);
- case SLJIT_LUMUL:
- case SLJIT_LSMUL:
+ case SLJIT_LMUL_UW:
+ case SLJIT_LMUL_SW:
FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
FAIL_IF(push_inst(compiler, MADD | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
- return push_inst(compiler, (op == SLJIT_LUMUL ? UMULH : SMULH) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
- case SLJIT_UDIVMOD:
- case SLJIT_SDIVMOD:
+ return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULH : SMULH) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
+ case SLJIT_DIVMOD_UW:
+ case SLJIT_DIVMOD_SW:
FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
- FAIL_IF(push_inst(compiler, ((op == SLJIT_UDIVMOD ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1)));
+ FAIL_IF(push_inst(compiler, ((op == SLJIT_DIVMOD_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1)));
FAIL_IF(push_inst(compiler, (MADD ^ inv_bits) | RD(SLJIT_R1) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
return push_inst(compiler, (SUB ^ inv_bits) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
- case SLJIT_UDIVI:
- case SLJIT_SDIVI:
- return push_inst(compiler, ((op == SLJIT_UDIVI ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1));
+ case SLJIT_DIV_UW:
+ case SLJIT_DIV_SW:
+ return push_inst(compiler, ((op == SLJIT_DIV_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1));
}
return SLJIT_SUCCESS;
@@ -1600,44 +1600,44 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
return push_inst(compiler, STR_FI | ins_bits | VT(reg) | RN(TMP_REG3));
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
sljit_s32 dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
- sljit_ins inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+ sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
- if (GET_OPCODE(op) == SLJIT_CONVI_FROMD)
+ if (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64)
inv_bits |= (1 << 31);
if (src & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw);
+ emit_fop_mem(compiler, (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw);
src = TMP_FREG1;
}
FAIL_IF(push_inst(compiler, (FCVTZS ^ inv_bits) | RD(dst_r) | VN(src)));
if (dst_r == TMP_REG1 && dst != SLJIT_UNUSED)
- return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONVI_FROMD) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw);
+ return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw);
return SLJIT_SUCCESS;
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
- sljit_ins inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+ sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
inv_bits |= (1 << 31);
if (src & SLJIT_MEM) {
- emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONVD_FROMI) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw);
+ emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw);
src = TMP_REG1;
} else if (src & SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
srcw = (sljit_s32)srcw;
#endif
FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
@@ -1647,7 +1647,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
FAIL_IF(push_inst(compiler, (SCVTF ^ inv_bits) | VD(dst_r) | RN(src)));
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, ((op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE) | STORE, TMP_FREG1, dst, dstw);
+ return emit_fop_mem(compiler, ((op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE) | STORE, TMP_FREG1, dst, dstw);
return SLJIT_SUCCESS;
}
@@ -1655,8 +1655,8 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- sljit_s32 mem_flags = (op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE;
- sljit_ins inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+ sljit_s32 mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
+ sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
if (src1 & SLJIT_MEM) {
emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
@@ -1675,7 +1675,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
- sljit_s32 dst_r, mem_flags = (op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE;
+ sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
sljit_ins inv_bits;
CHECK_ERROR();
@@ -1685,16 +1685,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
SLJIT_COMPILE_ASSERT((INT_SIZE ^ 0x100) == WORD_SIZE, must_be_one_bit_difference);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
- inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+ inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_MEM) {
- emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONVD_FROMS) ? (mem_flags ^ 0x100) : mem_flags, dst_r, src, srcw);
+ emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x100) : mem_flags, dst_r, src, srcw);
src = dst_r;
}
switch (GET_OPCODE(op)) {
- case SLJIT_DMOV:
+ case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1)
FAIL_IF(push_inst(compiler, (FMOV ^ inv_bits) | VD(dst_r) | VN(src)));
@@ -1702,14 +1702,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
dst_r = src;
}
break;
- case SLJIT_DNEG:
+ case SLJIT_NEG_F64:
FAIL_IF(push_inst(compiler, (FNEG ^ inv_bits) | VD(dst_r) | VN(src)));
break;
- case SLJIT_DABS:
+ case SLJIT_ABS_F64:
FAIL_IF(push_inst(compiler, (FABS ^ inv_bits) | VD(dst_r) | VN(src)));
break;
- case SLJIT_CONVD_FROMS:
- FAIL_IF(push_inst(compiler, FCVT | ((op & SLJIT_SINGLE_OP) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src)));
+ case SLJIT_CONV_F64_FROM_F32:
+ FAIL_IF(push_inst(compiler, FCVT | ((op & SLJIT_F32_OP) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src)));
break;
}
@@ -1723,8 +1723,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
- sljit_s32 dst_r, mem_flags = (op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE;
- sljit_ins inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+ sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
+ sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
CHECK_ERROR();
CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
@@ -1746,16 +1746,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
}
switch (GET_OPCODE(op)) {
- case SLJIT_DADD:
+ case SLJIT_ADD_F64:
FAIL_IF(push_inst(compiler, (FADD ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
break;
- case SLJIT_DSUB:
+ case SLJIT_SUB_F64:
FAIL_IF(push_inst(compiler, (FSUB ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
break;
- case SLJIT_DMUL:
+ case SLJIT_MUL_F64:
FAIL_IF(push_inst(compiler, (FMUL ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
break;
- case SLJIT_DDIV:
+ case SLJIT_DIV_F64:
FAIL_IF(push_inst(compiler, (FDIV ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
break;
}
@@ -1811,28 +1811,28 @@ static sljit_uw get_cc(sljit_s32 type)
switch (type) {
case SLJIT_EQUAL:
case SLJIT_MUL_NOT_OVERFLOW:
- case SLJIT_D_EQUAL:
+ case SLJIT_EQUAL_F64:
return 0x1;
case SLJIT_NOT_EQUAL:
case SLJIT_MUL_OVERFLOW:
- case SLJIT_D_NOT_EQUAL:
+ case SLJIT_NOT_EQUAL_F64:
return 0x0;
case SLJIT_LESS:
- case SLJIT_D_LESS:
+ case SLJIT_LESS_F64:
return 0x2;
case SLJIT_GREATER_EQUAL:
- case SLJIT_D_GREATER_EQUAL:
+ case SLJIT_GREATER_EQUAL_F64:
return 0x3;
case SLJIT_GREATER:
- case SLJIT_D_GREATER:
+ case SLJIT_GREATER_F64:
return 0x9;
case SLJIT_LESS_EQUAL:
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_LESS_EQUAL_F64:
return 0x8;
case SLJIT_SIG_LESS:
@@ -1848,11 +1848,11 @@ static sljit_uw get_cc(sljit_s32 type)
return 0xc;
case SLJIT_OVERFLOW:
- case SLJIT_D_UNORDERED:
+ case SLJIT_UNORDERED_F64:
return 0x7;
case SLJIT_NOT_OVERFLOW:
- case SLJIT_D_ORDERED:
+ case SLJIT_ORDERED_F64:
return 0x6;
default:
diff --git a/sljit/sljitNativeARM_T2_32.c b/sljit/sljitNativeARM_T2_32.c
index 22f19bd..1ed44a8 100644
--- a/sljit/sljitNativeARM_T2_32.c
+++ b/sljit/sljitNativeARM_T2_32.c
@@ -1251,18 +1251,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
return push_inst16(compiler, BKPT);
case SLJIT_NOP:
return push_inst16(compiler, NOP);
- case SLJIT_LUMUL:
- case SLJIT_LSMUL:
- return push_inst32(compiler, (op == SLJIT_LUMUL ? UMULL : SMULL)
+ case SLJIT_LMUL_UW:
+ case SLJIT_LMUL_SW:
+ return push_inst32(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
| (reg_map[SLJIT_R1] << 8)
| (reg_map[SLJIT_R0] << 12)
| (reg_map[SLJIT_R0] << 16)
| reg_map[SLJIT_R1]);
- case SLJIT_UDIVMOD:
- case SLJIT_SDIVMOD:
- case SLJIT_UDIVI:
- case SLJIT_SDIVI:
- SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+ case SLJIT_DIVMOD_UW:
+ case SLJIT_DIVMOD_SW:
+ case SLJIT_DIV_UW:
+ case SLJIT_DIV_SW:
+ SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
SLJIT_COMPILE_ASSERT(reg_map[2] == 1 && reg_map[3] == 2 && reg_map[4] == 12, bad_register_mapping);
saved_reg_count = 0;
@@ -1270,7 +1270,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
saved_reg_list[saved_reg_count++] = 12;
if (compiler->scratches >= 3)
saved_reg_list[saved_reg_count++] = 2;
- if (op >= SLJIT_UDIVI)
+ if (op >= SLJIT_DIV_UW)
saved_reg_list[saved_reg_count++] = 1;
if (saved_reg_count > 0) {
@@ -1288,7 +1288,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
#if defined(__GNUC__)
FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM,
- ((op | 0x2) == SLJIT_UDIVI ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
+ ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
#else
#error "Software divmod functions are needed"
#endif
@@ -1566,7 +1566,7 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
{
sljit_sw tmp;
sljit_uw imm;
- sljit_sw inst = VSTR_F32 | (flags & (SLJIT_SINGLE_OP | FPU_LOAD));
+ sljit_sw inst = VSTR_F32 | (flags & (SLJIT_F32_OP | FPU_LOAD));
SLJIT_ASSERT(arg & SLJIT_MEM);
@@ -1626,16 +1626,16 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG3) | DD4(reg));
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
if (src & SLJIT_MEM) {
- FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
src = TMP_FREG1;
}
- FAIL_IF(push_inst32(compiler, VCVT_S32_F32 | (op & SLJIT_SINGLE_OP) | DD4(TMP_FREG1) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VCVT_S32_F32 | (op & SLJIT_F32_OP) | DD4(TMP_FREG1) | DM4(src)));
if (dst == SLJIT_UNUSED)
return SLJIT_SUCCESS;
@@ -1647,7 +1647,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler
return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw);
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -1664,10 +1664,10 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
FAIL_IF(push_inst32(compiler, VMOV | RT4(TMP_REG1) | DN4(TMP_FREG1)));
}
- FAIL_IF(push_inst32(compiler, VCVT_F32_S32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(TMP_FREG1)));
+ FAIL_IF(push_inst32(compiler, VCVT_F32_S32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(TMP_FREG1)));
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw);
+ return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
return SLJIT_SUCCESS;
}
@@ -1676,16 +1676,16 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
sljit_s32 src2, sljit_sw src2w)
{
if (src1 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
+ emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
+ emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
src2 = TMP_FREG2;
}
- FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_SINGLE_OP) | DD4(src1) | DM4(src2)));
+ FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_F32_OP) | DD4(src1) | DM4(src2)));
return push_inst32(compiler, VMRS);
}
@@ -1698,42 +1698,42 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
CHECK_ERROR();
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- if (GET_OPCODE(op) != SLJIT_CONVD_FROMS)
- op ^= SLJIT_SINGLE_OP;
+ if (GET_OPCODE(op) != SLJIT_CONV_F64_FROM_F32)
+ op ^= SLJIT_F32_OP;
- SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, dst_r, src, srcw);
+ emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, dst_r, src, srcw);
src = dst_r;
}
switch (GET_OPCODE(op)) {
- case SLJIT_DMOV:
+ case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1)
- FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
else
dst_r = src;
}
break;
- case SLJIT_DNEG:
- FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src)));
+ case SLJIT_NEG_F64:
+ FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
break;
- case SLJIT_DABS:
- FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src)));
+ case SLJIT_ABS_F64:
+ FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
break;
- case SLJIT_CONVD_FROMS:
- FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src)));
- op ^= SLJIT_SINGLE_OP;
+ case SLJIT_CONV_F64_FROM_F32:
+ FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
+ op ^= SLJIT_F32_OP;
break;
}
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), dst_r, dst, dstw);
+ return emit_fop_mem(compiler, (op & SLJIT_F32_OP), dst_r, dst, dstw);
return SLJIT_SUCCESS;
}
@@ -1752,36 +1752,36 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- op ^= SLJIT_SINGLE_OP;
+ op ^= SLJIT_F32_OP;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src1 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
+ emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
+ emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
src2 = TMP_FREG2;
}
switch (GET_OPCODE(op)) {
- case SLJIT_DADD:
- FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ case SLJIT_ADD_F64:
+ FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
break;
- case SLJIT_DSUB:
- FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ case SLJIT_SUB_F64:
+ FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
break;
- case SLJIT_DMUL:
- FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ case SLJIT_MUL_F64:
+ FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
break;
- case SLJIT_DDIV:
- FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ case SLJIT_DIV_F64:
+ FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
break;
}
if (!(dst & SLJIT_MEM))
return SLJIT_SUCCESS;
- return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw);
+ return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
}
#undef FPU_LOAD
@@ -1845,28 +1845,28 @@ static sljit_uw get_cc(sljit_s32 type)
switch (type) {
case SLJIT_EQUAL:
case SLJIT_MUL_NOT_OVERFLOW:
- case SLJIT_D_EQUAL:
+ case SLJIT_EQUAL_F64:
return 0x0;
case SLJIT_NOT_EQUAL:
case SLJIT_MUL_OVERFLOW:
- case SLJIT_D_NOT_EQUAL:
+ case SLJIT_NOT_EQUAL_F64:
return 0x1;
case SLJIT_LESS:
- case SLJIT_D_LESS:
+ case SLJIT_LESS_F64:
return 0x3;
case SLJIT_GREATER_EQUAL:
- case SLJIT_D_GREATER_EQUAL:
+ case SLJIT_GREATER_EQUAL_F64:
return 0x2;
case SLJIT_GREATER:
- case SLJIT_D_GREATER:
+ case SLJIT_GREATER_F64:
return 0x8;
case SLJIT_LESS_EQUAL:
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_LESS_EQUAL_F64:
return 0x9;
case SLJIT_SIG_LESS:
@@ -1882,11 +1882,11 @@ static sljit_uw get_cc(sljit_s32 type)
return 0xd;
case SLJIT_OVERFLOW:
- case SLJIT_D_UNORDERED:
+ case SLJIT_UNORDERED_F64:
return 0x6;
case SLJIT_NOT_OVERFLOW:
- case SLJIT_D_ORDERED:
+ case SLJIT_ORDERED_F64:
return 0x7;
default: /* SLJIT_JUMP */
diff --git a/sljit/sljitNativeMIPS_common.c b/sljit/sljitNativeMIPS_common.c
index e13212b..c2c251b 100644
--- a/sljit/sljitNativeMIPS_common.c
+++ b/sljit/sljitNativeMIPS_common.c
@@ -1044,20 +1044,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
return push_inst(compiler, BREAK, UNMOVABLE_INS);
case SLJIT_NOP:
return push_inst(compiler, NOP, UNMOVABLE_INS);
- case SLJIT_LUMUL:
- case SLJIT_LSMUL:
+ case SLJIT_LMUL_UW:
+ case SLJIT_LMUL_SW:
#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
- FAIL_IF(push_inst(compiler, (op == SLJIT_LUMUL ? DMULTU : DMULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? DMULTU : DMULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
#else
- FAIL_IF(push_inst(compiler, (op == SLJIT_LUMUL ? MULTU : MULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? MULTU : MULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
#endif
FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_R0), DR(SLJIT_R0)));
return push_inst(compiler, MFHI | D(SLJIT_R1), DR(SLJIT_R1));
- case SLJIT_UDIVMOD:
- case SLJIT_SDIVMOD:
- case SLJIT_UDIVI:
- case SLJIT_SDIVI:
- SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+ case SLJIT_DIVMOD_UW:
+ case SLJIT_DIVMOD_SW:
+ case SLJIT_DIV_UW:
+ case SLJIT_DIV_SW:
+ SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
#if !(defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1)
FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
@@ -1065,15 +1065,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
if (int_op)
- FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_UDIVI ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
else
- FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_UDIVI ? DDIVU : DDIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DDIVU : DDIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
#else
- FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_UDIVI ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
#endif
FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_R0), DR(SLJIT_R0)));
- return (op >= SLJIT_UDIVI) ? SLJIT_SUCCESS : push_inst(compiler, MFHI | D(SLJIT_R1), DR(SLJIT_R1));
+ return (op >= SLJIT_DIV_UW) ? SLJIT_SUCCESS : push_inst(compiler, MFHI | D(SLJIT_R1), DR(SLJIT_R1));
}
return SLJIT_SUCCESS;
@@ -1286,17 +1286,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_is_fpu_available(void)
#endif
}
-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 7))
-#define FMT(op) (((op & SLJIT_SINGLE_OP) ^ SLJIT_SINGLE_OP) << (21 - 8))
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 7))
+#define FMT(op) (((op & SLJIT_F32_OP) ^ SLJIT_F32_OP) << (21 - 8))
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
# define flags 0
#else
- sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONVW_FROMD) << 21;
+ sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64) << 21;
#endif
if (src & SLJIT_MEM) {
@@ -1322,14 +1322,14 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler
#endif
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
# define flags 0
#else
- sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONVD_FROMW) << 21;
+ sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW) << 21;
#endif
sljit_s32 dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;
@@ -1342,14 +1342,14 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
}
else {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
srcw = (sljit_s32)srcw;
#endif
FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw));
FAIL_IF(push_inst(compiler, MTC1 | flags | T(TMP_REG1) | FS(TMP_FREG1), MOVABLE_INS));
}
- FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | (((op & SLJIT_SINGLE_OP) ^ SLJIT_SINGLE_OP) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
+ FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | (((op & SLJIT_F32_OP) ^ SLJIT_F32_OP) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
if (dst & SLJIT_MEM)
return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
@@ -1409,11 +1409,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMS)
- op ^= SLJIT_SINGLE_OP;
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
+ op ^= SLJIT_F32_OP;
dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;
@@ -1425,7 +1425,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
src <<= 1;
switch (GET_OPCODE(op)) {
- case SLJIT_DMOV:
+ case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1)
FAIL_IF(push_inst(compiler, MOV_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS));
@@ -1433,15 +1433,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
dst_r = src;
}
break;
- case SLJIT_DNEG:
+ case SLJIT_NEG_F64:
FAIL_IF(push_inst(compiler, NEG_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS));
break;
- case SLJIT_DABS:
+ case SLJIT_ABS_F64:
FAIL_IF(push_inst(compiler, ABS_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS));
break;
- case SLJIT_CONVD_FROMS:
- FAIL_IF(push_inst(compiler, CVT_S_S | ((op & SLJIT_SINGLE_OP) ? 1 : (1 << 21)) | FS(src) | FD(dst_r), MOVABLE_INS));
- op ^= SLJIT_SINGLE_OP;
+ case SLJIT_CONV_F64_FROM_F32:
+ FAIL_IF(push_inst(compiler, CVT_S_S | ((op & SLJIT_F32_OP) ? 1 : (1 << 21)) | FS(src) | FD(dst_r), MOVABLE_INS));
+ op ^= SLJIT_F32_OP;
break;
}
@@ -1509,19 +1509,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
src2 = TMP_FREG2;
switch (GET_OPCODE(op)) {
- case SLJIT_DADD:
+ case SLJIT_ADD_F64:
FAIL_IF(push_inst(compiler, ADD_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
break;
- case SLJIT_DSUB:
+ case SLJIT_SUB_F64:
FAIL_IF(push_inst(compiler, SUB_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
break;
- case SLJIT_DMUL:
+ case SLJIT_MUL_F64:
FAIL_IF(push_inst(compiler, MUL_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
break;
- case SLJIT_DDIV:
+ case SLJIT_DIV_F64:
FAIL_IF(push_inst(compiler, DIV_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
break;
}
@@ -1634,27 +1634,27 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
switch (type) {
case SLJIT_EQUAL:
- case SLJIT_D_NOT_EQUAL:
+ case SLJIT_NOT_EQUAL_F64:
BR_NZ(EQUAL_FLAG);
break;
case SLJIT_NOT_EQUAL:
- case SLJIT_D_EQUAL:
+ case SLJIT_EQUAL_F64:
BR_Z(EQUAL_FLAG);
break;
case SLJIT_LESS:
- case SLJIT_D_LESS:
+ case SLJIT_LESS_F64:
BR_Z(ULESS_FLAG);
break;
case SLJIT_GREATER_EQUAL:
- case SLJIT_D_GREATER_EQUAL:
+ case SLJIT_GREATER_EQUAL_F64:
BR_NZ(ULESS_FLAG);
break;
case SLJIT_GREATER:
- case SLJIT_D_GREATER:
+ case SLJIT_GREATER_F64:
BR_Z(UGREATER_FLAG);
break;
case SLJIT_LESS_EQUAL:
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_LESS_EQUAL_F64:
BR_NZ(UGREATER_FLAG);
break;
case SLJIT_SIG_LESS:
@@ -1677,10 +1677,10 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
case SLJIT_MUL_NOT_OVERFLOW:
BR_NZ(OVERFLOW_FLAG);
break;
- case SLJIT_D_UNORDERED:
+ case SLJIT_UNORDERED_F64:
BR_F();
break;
- case SLJIT_D_ORDERED:
+ case SLJIT_ORDERED_F64:
BR_T();
break;
default:
@@ -1888,37 +1888,37 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compile
jump->flags |= IS_BIT16_COND;
switch (type & 0xff) {
- case SLJIT_D_EQUAL:
+ case SLJIT_EQUAL_F64:
inst = C_UEQ_S;
if_true = 1;
break;
- case SLJIT_D_NOT_EQUAL:
+ case SLJIT_NOT_EQUAL_F64:
inst = C_UEQ_S;
if_true = 0;
break;
- case SLJIT_D_LESS:
+ case SLJIT_LESS_F64:
inst = C_ULT_S;
if_true = 1;
break;
- case SLJIT_D_GREATER_EQUAL:
+ case SLJIT_GREATER_EQUAL_F64:
inst = C_ULT_S;
if_true = 0;
break;
- case SLJIT_D_GREATER:
+ case SLJIT_GREATER_F64:
inst = C_ULE_S;
if_true = 0;
break;
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_LESS_EQUAL_F64:
inst = C_ULE_S;
if_true = 1;
break;
- case SLJIT_D_UNORDERED:
+ case SLJIT_UNORDERED_F64:
inst = C_UN_S;
if_true = 1;
break;
default: /* Make compilers happy. */
SLJIT_ASSERT_STOP();
- case SLJIT_D_ORDERED:
+ case SLJIT_ORDERED_F64:
inst = C_UN_S;
if_true = 0;
break;
@@ -2045,14 +2045,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
break;
case SLJIT_LESS:
case SLJIT_GREATER_EQUAL:
- case SLJIT_D_LESS:
- case SLJIT_D_GREATER_EQUAL:
+ case SLJIT_LESS_F64:
+ case SLJIT_GREATER_EQUAL_F64:
dst_ar = ULESS_FLAG;
break;
case SLJIT_GREATER:
case SLJIT_LESS_EQUAL:
- case SLJIT_D_GREATER:
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_GREATER_F64:
+ case SLJIT_LESS_EQUAL_F64:
dst_ar = UGREATER_FLAG;
break;
case SLJIT_SIG_LESS:
@@ -2073,13 +2073,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
dst_ar = sugg_dst_ar;
type ^= 0x1; /* Flip type bit for the XORI below. */
break;
- case SLJIT_D_EQUAL:
- case SLJIT_D_NOT_EQUAL:
+ case SLJIT_EQUAL_F64:
+ case SLJIT_NOT_EQUAL_F64:
dst_ar = EQUAL_FLAG;
break;
- case SLJIT_D_UNORDERED:
- case SLJIT_D_ORDERED:
+ case SLJIT_UNORDERED_F64:
+ case SLJIT_ORDERED_F64:
FAIL_IF(push_inst(compiler, CFC1 | TA(sugg_dst_ar) | DA(FCSR_REG), sugg_dst_ar));
FAIL_IF(push_inst(compiler, SRL | TA(sugg_dst_ar) | DA(sugg_dst_ar) | SH_IMM(23), sugg_dst_ar));
FAIL_IF(push_inst(compiler, ANDI | SA(sugg_dst_ar) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar));
diff --git a/sljit/sljitNativePPC_common.c b/sljit/sljitNativePPC_common.c
index f783f56..b213129 100644
--- a/sljit/sljitNativePPC_common.c
+++ b/sljit/sljitNativePPC_common.c
@@ -1257,33 +1257,33 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
case SLJIT_BREAKPOINT:
case SLJIT_NOP:
return push_inst(compiler, NOP);
- case SLJIT_LUMUL:
- case SLJIT_LSMUL:
+ case SLJIT_LMUL_UW:
+ case SLJIT_LMUL_SW:
FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R0)));
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
FAIL_IF(push_inst(compiler, MULLD | D(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R1)));
- return push_inst(compiler, (op == SLJIT_LUMUL ? MULHDU : MULHD) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1));
+ return push_inst(compiler, (op == SLJIT_LMUL_UW ? MULHDU : MULHD) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1));
#else
FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R1)));
- return push_inst(compiler, (op == SLJIT_LUMUL ? MULHWU : MULHW) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1));
+ return push_inst(compiler, (op == SLJIT_LMUL_UW ? MULHWU : MULHW) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1));
#endif
- case SLJIT_UDIVMOD:
- case SLJIT_SDIVMOD:
+ case SLJIT_DIVMOD_UW:
+ case SLJIT_DIVMOD_SW:
FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R0)));
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- FAIL_IF(push_inst(compiler, (int_op ? (op == SLJIT_UDIVMOD ? DIVWU : DIVW) : (op == SLJIT_UDIVMOD ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)));
+ FAIL_IF(push_inst(compiler, (int_op ? (op == SLJIT_DIVMOD_UW ? DIVWU : DIVW) : (op == SLJIT_DIVMOD_UW ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)));
FAIL_IF(push_inst(compiler, (int_op ? MULLW : MULLD) | D(SLJIT_R1) | A(SLJIT_R0) | B(SLJIT_R1)));
#else
- FAIL_IF(push_inst(compiler, (op == SLJIT_UDIVMOD ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)));
+ FAIL_IF(push_inst(compiler, (op == SLJIT_DIVMOD_UW ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)));
FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_R1) | A(SLJIT_R0) | B(SLJIT_R1)));
#endif
return push_inst(compiler, SUBF | D(SLJIT_R1) | A(SLJIT_R1) | B(TMP_REG1));
- case SLJIT_UDIVI:
- case SLJIT_SDIVI:
+ case SLJIT_DIV_UW:
+ case SLJIT_DIV_SW:
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
- return push_inst(compiler, (int_op ? (op == SLJIT_UDIVI ? DIVWU : DIVW) : (op == SLJIT_UDIVI ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1));
+ return push_inst(compiler, (int_op ? (op == SLJIT_DIV_UW ? DIVWU : DIVW) : (op == SLJIT_DIV_UW ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1));
#else
- return push_inst(compiler, (op == SLJIT_UDIVI ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1));
+ return push_inst(compiler, (op == SLJIT_DIV_UW ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1));
#endif
}
@@ -1691,8 +1691,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_is_fpu_available(void)
#endif
}
-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 6))
-#define SELECT_FOP(op, single, double) ((op & SLJIT_SINGLE_OP) ? single : double)
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 6))
+#define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double)
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
#define FLOAT_TMP_MEM_OFFSET (6 * sizeof(sljit_sw))
@@ -1709,7 +1709,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_is_fpu_available(void)
#endif /* SLJIT_CONFIG_PPC_64 */
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -1721,12 +1721,12 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
op = GET_OPCODE(op);
- FAIL_IF(push_inst(compiler, (op == SLJIT_CONVI_FROMD ? FCTIWZ : FCTIDZ) | FD(TMP_FREG1) | FB(src)));
+ FAIL_IF(push_inst(compiler, (op == SLJIT_CONV_S32_FROM_F64 ? FCTIWZ : FCTIDZ) | FD(TMP_FREG1) | FB(src)));
if (dst == SLJIT_UNUSED)
return SLJIT_SUCCESS;
- if (op == SLJIT_CONVW_FROMD) {
+ if (op == SLJIT_CONV_SW_FROM_F64) {
if (FAST_IS_REG(dst)) {
FAIL_IF(emit_op_mem2(compiler, DOUBLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, 0, 0));
return emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, 0, 0);
@@ -1777,7 +1777,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler
return push_inst(compiler, STFIWX | FS(TMP_FREG1) | A(dst & REG_MASK) | B(dstw));
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -1786,12 +1786,12 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src & SLJIT_IMM) {
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
srcw = (sljit_s32)srcw;
FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
src = TMP_REG1;
}
- else if (GET_OPCODE(op) == SLJIT_CONVD_FROMI) {
+ else if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) {
if (FAST_IS_REG(src))
FAIL_IF(push_inst(compiler, EXTSW | S(src) | A(TMP_REG1)));
else
@@ -1810,7 +1810,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
if (dst & SLJIT_MEM)
return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
- if (op & SLJIT_SINGLE_OP)
+ if (op & SLJIT_F32_OP)
return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r));
return SLJIT_SUCCESS;
@@ -1848,7 +1848,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
if (dst & SLJIT_MEM)
return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
- if (op & SLJIT_SINGLE_OP)
+ if (op & SLJIT_F32_OP)
return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r));
return SLJIT_SUCCESS;
@@ -1882,11 +1882,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMS)
- op ^= SLJIT_SINGLE_OP;
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
+ op ^= SLJIT_F32_OP;
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
@@ -1896,14 +1896,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
}
switch (GET_OPCODE(op)) {
- case SLJIT_CONVD_FROMS:
- op ^= SLJIT_SINGLE_OP;
- if (op & SLJIT_SINGLE_OP) {
+ case SLJIT_CONV_F64_FROM_F32:
+ op ^= SLJIT_F32_OP;
+ if (op & SLJIT_F32_OP) {
FAIL_IF(push_inst(compiler, FRSP | FD(dst_r) | FB(src)));
break;
}
/* Fall through. */
- case SLJIT_DMOV:
+ case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1)
FAIL_IF(push_inst(compiler, FMR | FD(dst_r) | FB(src)));
@@ -1911,10 +1911,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
dst_r = src;
}
break;
- case SLJIT_DNEG:
+ case SLJIT_NEG_F64:
FAIL_IF(push_inst(compiler, FNEG | FD(dst_r) | FB(src)));
break;
- case SLJIT_DABS:
+ case SLJIT_ABS_F64:
FAIL_IF(push_inst(compiler, FABS | FD(dst_r) | FB(src)));
break;
}
@@ -1979,19 +1979,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
src2 = TMP_FREG2;
switch (GET_OPCODE(op)) {
- case SLJIT_DADD:
+ case SLJIT_ADD_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADD) | FD(dst_r) | FA(src1) | FB(src2)));
break;
- case SLJIT_DSUB:
+ case SLJIT_SUB_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUB) | FD(dst_r) | FA(src1) | FB(src2)));
break;
- case SLJIT_DMUL:
+ case SLJIT_MUL_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMUL) | FD(dst_r) | FA(src1) | FC(src2) /* FMUL use FC as src2 */));
break;
- case SLJIT_DDIV:
+ case SLJIT_DIV_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIV) | FD(dst_r) | FA(src1) | FB(src2)));
break;
}
@@ -2075,19 +2075,19 @@ static sljit_ins get_bo_bi_flags(sljit_s32 type)
return (4 << 21) | (2 << 16);
case SLJIT_LESS:
- case SLJIT_D_LESS:
+ case SLJIT_LESS_F64:
return (12 << 21) | ((4 + 0) << 16);
case SLJIT_GREATER_EQUAL:
- case SLJIT_D_GREATER_EQUAL:
+ case SLJIT_GREATER_EQUAL_F64:
return (4 << 21) | ((4 + 0) << 16);
case SLJIT_GREATER:
- case SLJIT_D_GREATER:
+ case SLJIT_GREATER_F64:
return (12 << 21) | ((4 + 1) << 16);
case SLJIT_LESS_EQUAL:
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_LESS_EQUAL_F64:
return (4 << 21) | ((4 + 1) << 16);
case SLJIT_SIG_LESS:
@@ -2110,16 +2110,16 @@ static sljit_ins get_bo_bi_flags(sljit_s32 type)
case SLJIT_MUL_NOT_OVERFLOW:
return (4 << 21) | (3 << 16);
- case SLJIT_D_EQUAL:
+ case SLJIT_EQUAL_F64:
return (12 << 21) | ((4 + 2) << 16);
- case SLJIT_D_NOT_EQUAL:
+ case SLJIT_NOT_EQUAL_F64:
return (4 << 21) | ((4 + 2) << 16);
- case SLJIT_D_UNORDERED:
+ case SLJIT_UNORDERED_F64:
return (12 << 21) | ((4 + 3) << 16);
- case SLJIT_D_ORDERED:
+ case SLJIT_ORDERED_F64:
return (4 << 21) | ((4 + 3) << 16);
default:
@@ -2255,23 +2255,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
break;
case SLJIT_LESS:
- case SLJIT_D_LESS:
+ case SLJIT_LESS_F64:
GET_CR_BIT(4 + 0, reg);
break;
case SLJIT_GREATER_EQUAL:
- case SLJIT_D_GREATER_EQUAL:
+ case SLJIT_GREATER_EQUAL_F64:
GET_CR_BIT(4 + 0, reg);
INVERT_BIT(reg);
break;
case SLJIT_GREATER:
- case SLJIT_D_GREATER:
+ case SLJIT_GREATER_F64:
GET_CR_BIT(4 + 1, reg);
break;
case SLJIT_LESS_EQUAL:
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_LESS_EQUAL_F64:
GET_CR_BIT(4 + 1, reg);
INVERT_BIT(reg);
break;
@@ -2305,20 +2305,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
INVERT_BIT(reg);
break;
- case SLJIT_D_EQUAL:
+ case SLJIT_EQUAL_F64:
GET_CR_BIT(4 + 2, reg);
break;
- case SLJIT_D_NOT_EQUAL:
+ case SLJIT_NOT_EQUAL_F64:
GET_CR_BIT(4 + 2, reg);
INVERT_BIT(reg);
break;
- case SLJIT_D_UNORDERED:
+ case SLJIT_UNORDERED_F64:
GET_CR_BIT(4 + 3, reg);
break;
- case SLJIT_D_ORDERED:
+ case SLJIT_ORDERED_F64:
GET_CR_BIT(4 + 3, reg);
INVERT_BIT(reg);
break;
diff --git a/sljit/sljitNativeSPARC_common.c b/sljit/sljitNativeSPARC_common.c
index 8811cd7..ec4656f 100644
--- a/sljit/sljitNativeSPARC_common.c
+++ b/sljit/sljitNativeSPARC_common.c
@@ -769,30 +769,30 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
return push_inst(compiler, TA, UNMOVABLE_INS);
case SLJIT_NOP:
return push_inst(compiler, NOP, UNMOVABLE_INS);
- case SLJIT_LUMUL:
- case SLJIT_LSMUL:
+ case SLJIT_LMUL_UW:
+ case SLJIT_LMUL_SW:
#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
- FAIL_IF(push_inst(compiler, (op == SLJIT_LUMUL ? UMUL : SMUL) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
+ FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? UMUL : SMUL) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
return push_inst(compiler, RDY | D(SLJIT_R1), DR(SLJIT_R1));
#else
#error "Implementation required"
#endif
- case SLJIT_UDIVMOD:
- case SLJIT_SDIVMOD:
- case SLJIT_UDIVI:
- case SLJIT_SDIVI:
- SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+ case SLJIT_DIVMOD_UW:
+ case SLJIT_DIVMOD_SW:
+ case SLJIT_DIV_UW:
+ case SLJIT_DIV_SW:
+ SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
- if ((op | 0x2) == SLJIT_UDIVI)
+ if ((op | 0x2) == SLJIT_DIV_UW)
FAIL_IF(push_inst(compiler, WRY | S1(0), MOVABLE_INS));
else {
FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(SLJIT_R0) | IMM(31), DR(TMP_REG1)));
FAIL_IF(push_inst(compiler, WRY | S1(TMP_REG1), MOVABLE_INS));
}
- if (op <= SLJIT_SDIVMOD)
+ if (op <= SLJIT_DIVMOD_SW)
FAIL_IF(push_inst(compiler, OR | D(TMP_REG2) | S1(0) | S2(SLJIT_R0), DR(TMP_REG2)));
- FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_UDIVI ? UDIV : SDIV) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
- if (op >= SLJIT_UDIVI)
+ FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? UDIV : SDIV) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
+ if (op >= SLJIT_DIV_UW)
return SLJIT_SUCCESS;
FAIL_IF(push_inst(compiler, SMUL | D(SLJIT_R1) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R1)));
return push_inst(compiler, SUB | D(SLJIT_R1) | S1(TMP_REG2) | S2(SLJIT_R1), DR(SLJIT_R1));
@@ -949,11 +949,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_is_fpu_available(void)
#endif
}
-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 7))
-#define SELECT_FOP(op, single, double) ((op & SLJIT_SINGLE_OP) ? single : double)
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 7))
+#define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double)
#define FLOAT_TMP_MEM_OFFSET (22 * sizeof(sljit_sw))
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -978,7 +978,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler
return emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, dst, dstw, 0, 0);
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -986,7 +986,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
if (src & SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
srcw = (sljit_s32)srcw;
#endif
FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
@@ -1039,11 +1039,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
compiler->cache_arg = 0;
compiler->cache_argw = 0;
- SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
+ SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMS)
- op ^= SLJIT_SINGLE_OP;
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
+ op ^= SLJIT_F32_OP;
dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;
@@ -1055,30 +1055,30 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
src <<= 1;
switch (GET_OPCODE(op)) {
- case SLJIT_DMOV:
+ case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1) {
FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r) | S2A(src), MOVABLE_INS));
- if (!(op & SLJIT_SINGLE_OP))
+ if (!(op & SLJIT_F32_OP))
FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r | 1) | S2A(src | 1), MOVABLE_INS));
}
else
dst_r = src;
}
break;
- case SLJIT_DNEG:
+ case SLJIT_NEG_F64:
FAIL_IF(push_inst(compiler, FNEGS | DA(dst_r) | S2A(src), MOVABLE_INS));
- if (dst_r != src && !(op & SLJIT_SINGLE_OP))
+ if (dst_r != src && !(op & SLJIT_F32_OP))
FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r | 1) | S2A(src | 1), MOVABLE_INS));
break;
- case SLJIT_DABS:
+ case SLJIT_ABS_F64:
FAIL_IF(push_inst(compiler, FABSS | DA(dst_r) | S2A(src), MOVABLE_INS));
- if (dst_r != src && !(op & SLJIT_SINGLE_OP))
+ if (dst_r != src && !(op & SLJIT_F32_OP))
FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r | 1) | S2A(src | 1), MOVABLE_INS));
break;
- case SLJIT_CONVD_FROMS:
+ case SLJIT_CONV_F64_FROM_F32:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOD, FDTOS) | DA(dst_r) | S2A(src), MOVABLE_INS));
- op ^= SLJIT_SINGLE_OP;
+ op ^= SLJIT_F32_OP;
break;
}
@@ -1146,19 +1146,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
src2 = TMP_FREG2;
switch (GET_OPCODE(op)) {
- case SLJIT_DADD:
+ case SLJIT_ADD_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADDD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
break;
- case SLJIT_DSUB:
+ case SLJIT_SUB_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUBD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
break;
- case SLJIT_DMUL:
+ case SLJIT_MUL_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMULD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
break;
- case SLJIT_DDIV:
+ case SLJIT_DIV_F64:
FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIVD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
break;
}
@@ -1236,28 +1236,28 @@ static sljit_ins get_cc(sljit_s32 type)
switch (type) {
case SLJIT_EQUAL:
case SLJIT_MUL_NOT_OVERFLOW:
- case SLJIT_D_NOT_EQUAL: /* Unordered. */
+ case SLJIT_NOT_EQUAL_F64: /* Unordered. */
return DA(0x1);
case SLJIT_NOT_EQUAL:
case SLJIT_MUL_OVERFLOW:
- case SLJIT_D_EQUAL:
+ case SLJIT_EQUAL_F64:
return DA(0x9);
case SLJIT_LESS:
- case SLJIT_D_GREATER: /* Unordered. */
+ case SLJIT_GREATER_F64: /* Unordered. */
return DA(0x5);
case SLJIT_GREATER_EQUAL:
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_LESS_EQUAL_F64:
return DA(0xd);
case SLJIT_GREATER:
- case SLJIT_D_GREATER_EQUAL: /* Unordered. */
+ case SLJIT_GREATER_EQUAL_F64: /* Unordered. */
return DA(0xc);
case SLJIT_LESS_EQUAL:
- case SLJIT_D_LESS:
+ case SLJIT_LESS_F64:
return DA(0x4);
case SLJIT_SIG_LESS:
@@ -1273,11 +1273,11 @@ static sljit_ins get_cc(sljit_s32 type)
return DA(0x2);
case SLJIT_OVERFLOW:
- case SLJIT_D_UNORDERED:
+ case SLJIT_UNORDERED_F64:
return DA(0x7);
case SLJIT_NOT_OVERFLOW:
- case SLJIT_D_ORDERED:
+ case SLJIT_ORDERED_F64:
return DA(0xf);
default:
@@ -1298,7 +1298,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
type &= 0xff;
- if (type < SLJIT_D_EQUAL) {
+ if (type < SLJIT_EQUAL_F64) {
jump->flags |= IS_COND;
if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & ICC_IS_SET))
jump->flags |= IS_MOVABLE;
@@ -1395,7 +1395,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
}
type &= 0xff;
- if (type < SLJIT_D_EQUAL)
+ if (type < SLJIT_EQUAL_F64)
FAIL_IF(push_inst(compiler, BICC | get_cc(type) | 3, UNMOVABLE_INS));
else
FAIL_IF(push_inst(compiler, FBFCC | get_cc(type) | 3, UNMOVABLE_INS));
diff --git a/sljit/sljitNativeTILEGX_64.c b/sljit/sljitNativeTILEGX_64.c
index 79fe1d7..462a8b9 100644
--- a/sljit/sljitNativeTILEGX_64.c
+++ b/sljit/sljitNativeTILEGX_64.c
@@ -2180,10 +2180,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
case SLJIT_BREAKPOINT:
return PI(BPT);
- case SLJIT_LUMUL:
- case SLJIT_LSMUL:
- case SLJIT_UDIVI:
- case SLJIT_SDIVI:
+ case SLJIT_LMUL_UW:
+ case SLJIT_LMUL_SW:
+ case SLJIT_DIVMOD_UW:
+ case SLJIT_DIVMOD_SW:
+ case SLJIT_DIV_UW:
+ case SLJIT_DIV_SW:
SLJIT_ASSERT_STOP();
}
diff --git a/sljit/sljitNativeX86_common.c b/sljit/sljitNativeX86_common.c
index 9062fe0..54c3ac7 100644
--- a/sljit/sljitNativeX86_common.c
+++ b/sljit/sljitNativeX86_common.c
@@ -334,27 +334,27 @@ static sljit_u8 get_jump_code(sljit_s32 type)
{
switch (type) {
case SLJIT_EQUAL:
- case SLJIT_D_EQUAL:
+ case SLJIT_EQUAL_F64:
return 0x84 /* je */;
case SLJIT_NOT_EQUAL:
- case SLJIT_D_NOT_EQUAL:
+ case SLJIT_NOT_EQUAL_F64:
return 0x85 /* jne */;
case SLJIT_LESS:
- case SLJIT_D_LESS:
+ case SLJIT_LESS_F64:
return 0x82 /* jc */;
case SLJIT_GREATER_EQUAL:
- case SLJIT_D_GREATER_EQUAL:
+ case SLJIT_GREATER_EQUAL_F64:
return 0x83 /* jae */;
case SLJIT_GREATER:
- case SLJIT_D_GREATER:
+ case SLJIT_GREATER_F64:
return 0x87 /* jnbe */;
case SLJIT_LESS_EQUAL:
- case SLJIT_D_LESS_EQUAL:
+ case SLJIT_LESS_EQUAL_F64:
return 0x86 /* jbe */;
case SLJIT_SIG_LESS:
@@ -377,10 +377,10 @@ static sljit_u8 get_jump_code(sljit_s32 type)
case SLJIT_MUL_NOT_OVERFLOW:
return 0x81 /* jno */;
- case SLJIT_D_UNORDERED:
+ case SLJIT_UNORDERED_F64:
return 0x8a /* jp */;
- case SLJIT_D_ORDERED:
+ case SLJIT_ORDERED_F64:
return 0x8b /* jpo */;
}
return 0;
@@ -742,12 +742,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
INC_SIZE(1);
*inst = NOP;
break;
- case SLJIT_LUMUL:
- case SLJIT_LSMUL:
- case SLJIT_UDIVMOD:
- case SLJIT_SDIVMOD:
- case SLJIT_UDIVI:
- case SLJIT_SDIVI:
+ case SLJIT_LMUL_UW:
+ case SLJIT_LMUL_SW:
+ case SLJIT_DIVMOD_UW:
+ case SLJIT_DIVMOD_SW:
+ case SLJIT_DIV_UW:
+ case SLJIT_DIV_SW:
compiler->flags_saved = 0;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
#ifdef _WIN64
@@ -765,10 +765,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
#endif
compiler->mode32 = op & SLJIT_I32_OP;
#endif
- SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+ SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
op = GET_OPCODE(op);
- if ((op | 0x2) == SLJIT_UDIVI) {
+ if ((op | 0x2) == SLJIT_DIV_UW) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64)
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R1, 0);
inst = emit_x86_instruction(compiler, 1, SLJIT_R1, 0, SLJIT_R1, 0);
@@ -779,7 +779,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
*inst = XOR_r_rm;
}
- if ((op | 0x2) == SLJIT_SDIVI) {
+ if ((op | 0x2) == SLJIT_DIV_SW) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64)
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R1, 0);
#endif
@@ -810,10 +810,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
FAIL_IF(!inst);
INC_SIZE(2);
*inst++ = GROUP_F7;
- *inst = MOD_REG | ((op >= SLJIT_UDIVMOD) ? reg_map[TMP_REG1] : reg_map[SLJIT_R1]);
+ *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_map[TMP_REG1] : reg_map[SLJIT_R1]);
#else
#ifdef _WIN64
- size = (!compiler->mode32 || op >= SLJIT_UDIVMOD) ? 3 : 2;
+ size = (!compiler->mode32 || op >= SLJIT_DIVMOD_UW) ? 3 : 2;
#else
size = (!compiler->mode32) ? 3 : 2;
#endif
@@ -822,11 +822,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
INC_SIZE(size);
#ifdef _WIN64
if (!compiler->mode32)
- *inst++ = REX_W | ((op >= SLJIT_UDIVMOD) ? REX_B : 0);
- else if (op >= SLJIT_UDIVMOD)
+ *inst++ = REX_W | ((op >= SLJIT_DIVMOD_UW) ? REX_B : 0);
+ else if (op >= SLJIT_DIVMOD_UW)
*inst++ = REX_B;
*inst++ = GROUP_F7;
- *inst = MOD_REG | ((op >= SLJIT_UDIVMOD) ? reg_lmap[TMP_REG1] : reg_lmap[SLJIT_R1]);
+ *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_lmap[TMP_REG1] : reg_lmap[SLJIT_R1]);
#else
if (!compiler->mode32)
*inst++ = REX_W;
@@ -835,26 +835,26 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
#endif
#endif
switch (op) {
- case SLJIT_LUMUL:
+ case SLJIT_LMUL_UW:
*inst |= MUL;
break;
- case SLJIT_LSMUL:
+ case SLJIT_LMUL_SW:
*inst |= IMUL;
break;
- case SLJIT_UDIVMOD:
- case SLJIT_UDIVI:
+ case SLJIT_DIVMOD_UW:
+ case SLJIT_DIV_UW:
*inst |= DIV;
break;
- case SLJIT_SDIVMOD:
- case SLJIT_SDIVI:
+ case SLJIT_DIVMOD_SW:
+ case SLJIT_DIV_SW:
*inst |= IDIV;
break;
}
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && !defined(_WIN64)
- if (op <= SLJIT_SDIVMOD)
+ if (op <= SLJIT_DIVMOD_SW)
EMIT_MOV(compiler, SLJIT_R1, 0, TMP_REG1, 0);
#else
- if (op >= SLJIT_UDIVI)
+ if (op >= SLJIT_DIV_UW)
EMIT_MOV(compiler, SLJIT_R1, 0, TMP_REG1, 0);
#endif
break;
@@ -2322,7 +2322,7 @@ static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler,
return emit_sse2(compiler, MOVSD_xm_x, single, src, dst, dstw);
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -2330,11 +2330,11 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler
sljit_u8 *inst;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (GET_OPCODE(op) == SLJIT_CONVW_FROMD)
+ if (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64)
compiler->mode32 = 0;
#endif
- inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_SINGLE_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP2, dst_r, 0, src, srcw);
+ inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP2, dst_r, 0, src, srcw);
FAIL_IF(!inst);
*inst++ = GROUP_0F;
*inst = CVTTSD2SI_r_xm;
@@ -2344,7 +2344,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler
return SLJIT_SUCCESS;
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
@@ -2352,13 +2352,13 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
sljit_u8 *inst;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMW)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW)
compiler->mode32 = 0;
#endif
if (src & SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
srcw = (sljit_s32)srcw;
#endif
EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
@@ -2366,7 +2366,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
srcw = 0;
}
- inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_SINGLE_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP1, dst_r, 0, src, srcw);
+ inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP1, dst_r, 0, src, srcw);
FAIL_IF(!inst);
*inst++ = GROUP_0F;
*inst = CVTSI2SD_x_rm;
@@ -2375,7 +2375,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler
compiler->mode32 = 1;
#endif
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
@@ -2385,10 +2385,10 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
{
compiler->flags_saved = 0;
if (!FAST_IS_REG(src1)) {
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
src1 = TMP_FREG;
}
- return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_SINGLE_OP), src1, src2, src2w);
+ return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_F32_OP), src1, src2, src2w);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -2404,56 +2404,56 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
CHECK_ERROR();
SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
- if (GET_OPCODE(op) == SLJIT_DMOV) {
+ if (GET_OPCODE(op) == SLJIT_MOV_F64) {
if (FAST_IS_REG(dst))
- return emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst, src, srcw);
+ return emit_sse2_load(compiler, op & SLJIT_F32_OP, dst, src, srcw);
if (FAST_IS_REG(src))
- return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, src);
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src, srcw));
- return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, src);
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src, srcw));
+ return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
}
- if (GET_OPCODE(op) == SLJIT_CONVD_FROMS) {
+ if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) {
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG;
if (FAST_IS_REG(src)) {
/* We overwrite the high bits of source. From SLJIT point of view,
this is not an issue.
Note: In SSE3, we could also use MOVDDUP and MOVSLDUP. */
- FAIL_IF(emit_sse2_logic(compiler, UNPCKLPD_x_xm, op & SLJIT_SINGLE_OP, src, src, 0));
+ FAIL_IF(emit_sse2_logic(compiler, UNPCKLPD_x_xm, op & SLJIT_F32_OP, src, src, 0));
}
else {
- FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_SINGLE_OP), TMP_FREG, src, srcw));
+ FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_F32_OP), TMP_FREG, src, srcw));
src = TMP_FREG;
}
- FAIL_IF(emit_sse2_logic(compiler, CVTPD2PS_x_xm, op & SLJIT_SINGLE_OP, dst_r, src, 0));
+ FAIL_IF(emit_sse2_logic(compiler, CVTPD2PS_x_xm, op & SLJIT_F32_OP, dst_r, src, 0));
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
if (SLOW_IS_REG(dst)) {
dst_r = dst;
if (dst != src)
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src, srcw));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
}
else {
dst_r = TMP_FREG;
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src, srcw));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
}
switch (GET_OPCODE(op)) {
- case SLJIT_DNEG:
- FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_SINGLE_OP ? sse2_buffer : sse2_buffer + 8)));
+ case SLJIT_NEG_F64:
+ FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer : sse2_buffer + 8)));
break;
- case SLJIT_DABS:
- FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_SINGLE_OP ? sse2_buffer + 4 : sse2_buffer + 12)));
+ case SLJIT_ABS_F64:
+ FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer + 4 : sse2_buffer + 12)));
break;
}
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
@@ -2478,43 +2478,43 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
dst_r = dst;
if (dst == src1)
; /* Do nothing here. */
- else if (dst == src2 && (op == SLJIT_DADD || op == SLJIT_DMUL)) {
+ else if (dst == src2 && (op == SLJIT_ADD_F64 || op == SLJIT_MUL_F64)) {
/* Swap arguments. */
src2 = src1;
src2w = src1w;
}
else if (dst != src2)
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src1, src1w));
else {
dst_r = TMP_FREG;
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
}
}
else {
dst_r = TMP_FREG;
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
}
switch (GET_OPCODE(op)) {
- case SLJIT_DADD:
- FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w));
+ case SLJIT_ADD_F64:
+ FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
break;
- case SLJIT_DSUB:
- FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w));
+ case SLJIT_SUB_F64:
+ FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
break;
- case SLJIT_DMUL:
- FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w));
+ case SLJIT_MUL_F64:
+ FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
break;
- case SLJIT_DDIV:
- FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w));
+ case SLJIT_DIV_F64:
+ FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
break;
}
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
@@ -2966,7 +2966,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_x86_emit_cmov(struct sljit_compiler *co
#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
CHECK_ARGUMENT(sljit_x86_is_cmov_available());
CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP)));
- CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_D_ORDERED);
+ CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64);
CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg & ~SLJIT_I32_OP));
FUNCTION_CHECK_SRC(src, srcw);
#endif
@@ -2974,7 +2974,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_x86_emit_cmov(struct sljit_compiler *co
if (SLJIT_UNLIKELY(!!compiler->verbose)) {
fprintf(compiler->verbose, " x86_cmov%s %s%s, ",
!(dst_reg & SLJIT_I32_OP) ? "" : ".i",
- JUMP_PREFIX(type), jump_names[type & 0xff]);
+ jump_names[type & 0xff], JUMP_POSTFIX(type));
sljit_verbose_reg(compiler, dst_reg & ~SLJIT_I32_OP);
fprintf(compiler->verbose, ", ");
sljit_verbose_param(compiler, src, srcw);