summaryrefslogtreecommitdiff
path: root/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h')
-rw-r--r--src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h2296
1 files changed, 1887 insertions, 409 deletions
diff --git a/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h b/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
index 5a5553cc..3e6fcfba 100644
--- a/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
+++ b/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2011-2012 Oracle Corporation
+ * Copyright (C) 2011-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -66,7 +66,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -129,6 +129,8 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
+ if (pImpl != &g_iemAImpl_test)
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
break;
@@ -166,7 +168,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -188,7 +190,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -210,7 +212,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -270,7 +272,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
IEM_MC_ARG(uint32_t *, pEFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_REF_EFLAGS(pEFlags);
@@ -327,6 +329,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
break;
@@ -361,7 +364,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
IEM_MC_ARG(uint32_t *, pEFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_REF_EFLAGS(pEFlags);
@@ -378,12 +381,13 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
IEM_MC_ARG(uint32_t *, pEFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
break;
@@ -395,7 +399,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
IEM_MC_ARG(uint32_t *, pEFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_REF_EFLAGS(pEFlags);
@@ -479,6 +483,8 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
+ if (pImpl != &g_iemAImpl_test)
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -538,7 +544,7 @@ FNIEMOP_DEF_1(iemOp_Grp6_sldt, uint8_t, bRm)
if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
{
- IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
@@ -576,8 +582,8 @@ FNIEMOP_DEF_1(iemOp_Grp6_sldt, uint8_t, bRm)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Ldtr);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
- IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
+ IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
IEM_MC_FETCH_LDTR_U16(u16Ldtr);
IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Ldtr);
IEM_MC_ADVANCE_RIP();
@@ -595,7 +601,7 @@ FNIEMOP_DEF_1(iemOp_Grp6_str, uint8_t, bRm)
if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
{
- IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
@@ -633,8 +639,8 @@ FNIEMOP_DEF_1(iemOp_Grp6_str, uint8_t, bRm)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Tr);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
- IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
+ IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
IEM_MC_FETCH_TR_U16(u16Tr);
IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tr);
IEM_MC_ADVANCE_RIP();
@@ -652,7 +658,7 @@ FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
{
- IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
IEM_MC_BEGIN(1, 0);
IEM_MC_ARG(uint16_t, u16Sel, 0);
IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
@@ -664,9 +670,9 @@ FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
IEM_MC_BEGIN(1, 1);
IEM_MC_ARG(uint16_t, u16Sel, 0);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
- IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
+ IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test order */
IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
IEM_MC_END();
@@ -695,9 +701,9 @@ FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
IEM_MC_BEGIN(1, 1);
IEM_MC_ARG(uint16_t, u16Sel, 0);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test ordre */
IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
IEM_MC_END();
@@ -706,12 +712,51 @@ FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
}
+/** Opcode 0x0f 0x00 /3. */
+FNIEMOP_DEF_2(iemOpCommonGrp6VerX, uint8_t, bRm, bool, fWrite)
+{
+ IEMOP_HLP_NO_REAL_OR_V86_MODE();
+
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
+ IEM_MC_BEGIN(2, 0);
+ IEM_MC_ARG(uint16_t, u16Sel, 0);
+ IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
+ IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
+ IEM_MC_END();
+ }
+ else
+ {
+ IEM_MC_BEGIN(2, 1);
+ IEM_MC_ARG(uint16_t, u16Sel, 0);
+ IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
+ IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+}
+
+
/** Opcode 0x0f 0x00 /4. */
-FNIEMOP_STUB_1(iemOp_Grp6_verr, uint8_t, bRm);
+FNIEMOP_DEF_1(iemOp_Grp6_verr, uint8_t, bRm)
+{
+ IEMOP_MNEMONIC("verr Ew");
+ return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, false);
+}
/** Opcode 0x0f 0x00 /5. */
-FNIEMOP_STUB_1(iemOp_Grp6_verw, uint8_t, bRm);
+FNIEMOP_DEF_1(iemOp_Grp6_verw, uint8_t, bRm)
+{
+ IEMOP_MNEMONIC("verr Ew");
+ return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, true);
+}
/** Opcode 0x0f 0x00. */
@@ -740,11 +785,12 @@ FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
IEMOP_MNEMONIC("sgdt Ms");
IEMOP_HLP_64BIT_OP_SIZE();
IEM_MC_BEGIN(3, 1);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
+ IEM_MC_ARG(uint8_t, iEffSeg, 0);
IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_sgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
IEM_MC_END();
return VINF_SUCCESS;
@@ -789,11 +835,12 @@ FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
IEMOP_MNEMONIC("sidt Ms");
IEMOP_HLP_64BIT_OP_SIZE();
IEM_MC_BEGIN(3, 1);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
+ IEM_MC_ARG(uint8_t, iEffSeg, 0);
IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_sidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
IEM_MC_END();
return VINF_SUCCESS;
@@ -803,32 +850,33 @@ FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
/** Opcode 0x0f 0x01 /1. */
FNIEMOP_DEF(iemOp_Grp7_monitor)
{
- NOREF(pIemCpu);
- IEMOP_BITCH_ABOUT_STUB();
- return VERR_IEM_INSTR_NOT_IMPLEMENTED;
+ IEMOP_MNEMONIC("monitor");
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo Verify that monitor is allergic to lock prefixes. */
+ return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_monitor, pIemCpu->iEffSeg);
}
/** Opcode 0x0f 0x01 /1. */
FNIEMOP_DEF(iemOp_Grp7_mwait)
{
- NOREF(pIemCpu);
- IEMOP_BITCH_ABOUT_STUB();
- return VERR_IEM_INSTR_NOT_IMPLEMENTED;
+ IEMOP_MNEMONIC("mwait"); /** @todo Verify that mwait is allergic to lock prefixes. */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_mwait);
}
/** Opcode 0x0f 0x01 /2. */
FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
{
- IEMOP_HLP_NO_LOCK_PREFIX();
-
+ IEMOP_MNEMONIC("lgdt");
IEMOP_HLP_64BIT_OP_SIZE();
IEM_MC_BEGIN(3, 1);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
+ IEM_MC_ARG(uint8_t, iEffSeg, 0);
IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
IEM_MC_END();
return VINF_SUCCESS;
@@ -854,16 +902,16 @@ FNIEMOP_DEF(iemOp_Grp7_xsetbv)
/** Opcode 0x0f 0x01 /3. */
FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
{
- IEMOP_HLP_NO_LOCK_PREFIX();
-
IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
? IEMMODE_64BIT
: pIemCpu->enmEffOpSize;
IEM_MC_BEGIN(3, 1);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
+ IEM_MC_ARG(uint8_t, iEffSeg, 0);
IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
IEM_MC_END();
return VINF_SUCCESS;
@@ -897,6 +945,7 @@ FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
/** Opcode 0x0f 0x01 /4. */
FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
{
+ IEMOP_MNEMONIC("smsw");
IEMOP_HLP_NO_LOCK_PREFIX();
if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
{
@@ -938,7 +987,7 @@ FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Tmp);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_CR0_U16(u16Tmp);
IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
IEM_MC_ADVANCE_RIP();
@@ -953,6 +1002,7 @@ FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
{
/* The operand size is effectively ignored, all is 16-bit and only the
lower 3-bits are used. */
+ IEMOP_MNEMONIC("lmsw");
IEMOP_HLP_NO_LOCK_PREFIX();
if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
{
@@ -967,7 +1017,7 @@ FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
IEM_MC_BEGIN(1, 1);
IEM_MC_ARG(uint16_t, u16Tmp, 0);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
IEM_MC_END();
@@ -979,10 +1029,11 @@ FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
/** Opcode 0x0f 0x01 /7. */
FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
{
+ IEMOP_MNEMONIC("invlpg");
IEMOP_HLP_NO_LOCK_PREFIX();
IEM_MC_BEGIN(1, 1);
IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 0);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_CALL_CIMPL_1(iemCImpl_invlpg, GCPtrEffDst);
IEM_MC_END();
return VINF_SUCCESS;
@@ -992,9 +1043,10 @@ FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
/** Opcode 0x0f 0x01 /7. */
FNIEMOP_DEF(iemOp_Grp7_swapgs)
{
- NOREF(pIemCpu);
- IEMOP_BITCH_ABOUT_STUB();
- return VERR_IEM_INSTR_NOT_IMPLEMENTED;
+ IEMOP_MNEMONIC("swapgs");
+ IEMOP_HLP_NO_LOCK_PREFIX();
+ IEMOP_HLP_ONLY_64BIT();
+ return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_swapgs);
}
@@ -1084,13 +1136,132 @@ FNIEMOP_DEF(iemOp_Grp7)
}
}
+/** Opcode 0x0f 0x00 /3. */
+FNIEMOP_DEF_1(iemOpCommonLarLsl_Gv_Ew, bool, fIsLar)
+{
+ IEMOP_HLP_NO_REAL_OR_V86_MODE();
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_REG, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
+ switch (pIemCpu->enmEffOpSize)
+ {
+ case IEMMODE_16BIT:
+ {
+ IEM_MC_BEGIN(4, 0);
+ IEM_MC_ARG(uint16_t *, pu16Dst, 0);
+ IEM_MC_ARG(uint16_t, u16Sel, 1);
+ IEM_MC_ARG(uint32_t *, pEFlags, 2);
+ IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
+
+ IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_REF_EFLAGS(pEFlags);
+ IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
+
+ IEM_MC_END();
+ return VINF_SUCCESS;
+ }
+
+ case IEMMODE_32BIT:
+ case IEMMODE_64BIT:
+ {
+ IEM_MC_BEGIN(4, 0);
+ IEM_MC_ARG(uint64_t *, pu64Dst, 0);
+ IEM_MC_ARG(uint16_t, u16Sel, 1);
+ IEM_MC_ARG(uint32_t *, pEFlags, 2);
+ IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
+
+ IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_REF_EFLAGS(pEFlags);
+ IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
+
+ IEM_MC_END();
+ return VINF_SUCCESS;
+ }
+
+ IEM_NOT_REACHED_DEFAULT_CASE_RET();
+ }
+ }
+ else
+ {
+ switch (pIemCpu->enmEffOpSize)
+ {
+ case IEMMODE_16BIT:
+ {
+ IEM_MC_BEGIN(4, 1);
+ IEM_MC_ARG(uint16_t *, pu16Dst, 0);
+ IEM_MC_ARG(uint16_t, u16Sel, 1);
+ IEM_MC_ARG(uint32_t *, pEFlags, 2);
+ IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
+
+ IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_REF_EFLAGS(pEFlags);
+ IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
+
+ IEM_MC_END();
+ return VINF_SUCCESS;
+ }
+
+ case IEMMODE_32BIT:
+ case IEMMODE_64BIT:
+ {
+ IEM_MC_BEGIN(4, 1);
+ IEM_MC_ARG(uint64_t *, pu64Dst, 0);
+ IEM_MC_ARG(uint16_t, u16Sel, 1);
+ IEM_MC_ARG(uint32_t *, pEFlags, 2);
+ IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
+
+ IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_REF_EFLAGS(pEFlags);
+ IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
+
+ IEM_MC_END();
+ return VINF_SUCCESS;
+ }
+
+ IEM_NOT_REACHED_DEFAULT_CASE_RET();
+ }
+ }
+}
+
+
/** Opcode 0x0f 0x02. */
-FNIEMOP_STUB(iemOp_lar_Gv_Ew);
+FNIEMOP_DEF(iemOp_lar_Gv_Ew)
+{
+ IEMOP_MNEMONIC("lar Gv,Ew");
+ return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, true);
+}
+
+
/** Opcode 0x0f 0x03. */
-FNIEMOP_STUB(iemOp_lsl_Gv_Ew);
+FNIEMOP_DEF(iemOp_lsl_Gv_Ew)
+{
+ IEMOP_MNEMONIC("lsl Gv,Ew");
+ return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, false);
+}
+
+
/** Opcode 0x0f 0x04. */
-FNIEMOP_STUB(iemOp_syscall);
+FNIEMOP_DEF(iemOp_syscall)
+{
+ IEMOP_MNEMONIC("syscall");
+ IEMOP_HLP_NO_LOCK_PREFIX();
+ return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_syscall);
+}
/** Opcode 0x0f 0x05. */
@@ -1103,7 +1274,14 @@ FNIEMOP_DEF(iemOp_clts)
/** Opcode 0x0f 0x06. */
-FNIEMOP_STUB(iemOp_sysret);
+FNIEMOP_DEF(iemOp_sysret)
+{
+ IEMOP_MNEMONIC("sysret");
+ IEMOP_HLP_NO_LOCK_PREFIX();
+ return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sysret);
+}
+
+
/** Opcode 0x0f 0x08. */
FNIEMOP_STUB(iemOp_invd);
@@ -1158,7 +1336,7 @@ FNIEMOP_DEF(iemOp_nop_Ev_GrpP)
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
/* Currently a NOP. */
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
@@ -1291,17 +1469,17 @@ FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd)
/** Opcode 0x0f 0x11. */
FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
/** Opcode 0x0f 0x12. */
-FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq);
+FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq); //NEXT
/** Opcode 0x0f 0x13. */
-FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq);
+FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq); //NEXT
/** Opcode 0x0f 0x14. */
FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
/** Opcode 0x0f 0x15. */
FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
/** Opcode 0x0f 0x16. */
-FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq);
+FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq); //NEXT
/** Opcode 0x0f 0x17. */
-FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq);
+FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq); //NEXT
/** Opcode 0x0f 0x18. */
@@ -1326,7 +1504,7 @@ FNIEMOP_DEF(iemOp_prefetch_Grp16)
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
/* Currently a NOP. */
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
@@ -1352,7 +1530,7 @@ FNIEMOP_DEF(iemOp_nop_Ev)
{
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
/* Currently a NOP. */
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
@@ -1377,7 +1555,7 @@ FNIEMOP_DEF(iemOp_mov_Rd_Cd)
{
/* The lock prefix can be used to encode CR8 accesses on some CPUs. */
if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
- return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* #UD takes precedence over #GP(), see test. */
+ return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
iCrReg |= 8;
}
switch (iCrReg)
@@ -1423,7 +1601,7 @@ FNIEMOP_DEF(iemOp_mov_Cd_Rd)
{
/* The lock prefix can be used to encode CR8 accesses on some CPUs. */
if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
- return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* #UD takes precedence over #GP(), see test. */
+ return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
iCrReg |= 8;
}
switch (iCrReg)
@@ -1476,15 +1654,15 @@ FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
/** Opcode 0x0f 0x29. */
FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
/** Opcode 0x0f 0x2a. */
-FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey);
+FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey); //NEXT
/** Opcode 0x0f 0x2b. */
-FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd);
+FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd); //NEXT:XP
/** Opcode 0x0f 0x2c. */
-FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd);
+FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd); //NEXT
/** Opcode 0x0f 0x2d. */
FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
/** Opcode 0x0f 0x2e. */
-FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd);
+FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd); //NEXT
/** Opcode 0x0f 0x2f. */
FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
@@ -1591,7 +1769,7 @@ FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
IEM_MC_BEGIN(0, 2); \
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
IEM_MC_LOCAL(uint16_t, u16Tmp); \
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
a_Cnd { \
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
@@ -1604,7 +1782,7 @@ FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
IEM_MC_BEGIN(0, 2); \
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
IEM_MC_LOCAL(uint32_t, u32Tmp); \
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
a_Cnd { \
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
@@ -1619,7 +1797,7 @@ FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
IEM_MC_BEGIN(0, 2); \
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
IEM_MC_LOCAL(uint64_t, u64Tmp); \
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
a_Cnd { \
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
@@ -1780,9 +1958,9 @@ FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
/** Opcode 0x0f 0x57. */
FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
/** Opcode 0x0f 0x58. */
-FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd);
+FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd); //NEXT
/** Opcode 0x0f 0x59. */
-FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);
+FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);//NEXT
/** Opcode 0x0f 0x5a. */
FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
/** Opcode 0x0f 0x5b. */
@@ -1795,12 +1973,140 @@ FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
/** Opcode 0x0f 0x5f. */
FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
+
+
+/**
+ * Common worker for SSE2 and MMX instructions on the forms:
+ * pxxxx xmm1, xmm2/mem128
+ * pxxxx mm1, mm2/mem32
+ *
+ * The 2nd operand is the first half of a register, which in the memory case
+ * means a 32-bit memory access for MMX and 128-bit aligned 64-bit or 128-bit
+ * memory accessed for MMX.
+ *
+ * Exceptions type 4.
+ */
+FNIEMOP_DEF_1(iemOpCommonMmxSse_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl)
+{
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP: /* SSE */
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(2, 0);
+ IEM_MC_ARG(uint128_t *, pDst, 0);
+ IEM_MC_ARG(uint64_t const *, pSrc, 1);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(2, 2);
+ IEM_MC_ARG(uint128_t *, pDst, 0);
+ IEM_MC_LOCAL(uint64_t, uSrc);
+ IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
+
+ IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ case 0: /* MMX */
+ if (!pImpl->pfnU64)
+ return IEMOP_RAISE_INVALID_OPCODE();
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
+ /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(2, 0);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_ARG(uint32_t const *, pSrc, 1);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_REF_MREG_U32_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
+ IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(2, 2);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_LOCAL(uint32_t, uSrc);
+ IEM_MC_ARG_LOCAL_REF(uint32_t const *, pSrc, uSrc, 1);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_FETCH_MEM_U32(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
+
+ IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ default:
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
+}
+
+
/** Opcode 0x0f 0x60. */
-FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("punpcklbw");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklbw);
+}
+
+
/** Opcode 0x0f 0x61. */
-FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("punpcklwd"); /** @todo AMD mark the MMX version as 3DNow!. Intel says MMX CPUID req. */
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklwd);
+}
+
+
/** Opcode 0x0f 0x62. */
-FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("punpckldq");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpckldq);
+}
+
+
/** Opcode 0x0f 0x63. */
FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
/** Opcode 0x0f 0x64. */
@@ -1811,24 +2117,481 @@ FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
/** Opcode 0x0f 0x67. */
FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
+
+
+/**
+ * Common worker for SSE2 and MMX instructions on the forms:
+ * pxxxx xmm1, xmm2/mem128
+ * pxxxx mm1, mm2/mem64
+ *
+ * The 2nd operand is the second half of a register, which in the memory case
+ * means a 64-bit memory access for MMX, and for MMX a 128-bit aligned access
+ * where it may read the full 128 bits or only the upper 64 bits.
+ *
+ * Exceptions type 4.
+ */
+FNIEMOP_DEF_1(iemOpCommonMmxSse_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl)
+{
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP: /* SSE */
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(2, 0);
+ IEM_MC_ARG(uint128_t *, pDst, 0);
+ IEM_MC_ARG(uint128_t const *, pSrc, 1);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(2, 2);
+ IEM_MC_ARG(uint128_t *, pDst, 0);
+ IEM_MC_LOCAL(uint128_t, uSrc);
+ IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */
+
+ IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ case 0: /* MMX */
+ if (!pImpl->pfnU64)
+ return IEMOP_RAISE_INVALID_OPCODE();
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
+ /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(2, 0);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_ARG(uint64_t const *, pSrc, 1);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
+ IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(2, 2);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_LOCAL(uint64_t, uSrc);
+ IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
+
+ IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ default:
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
+}
+
+
/** Opcode 0x0f 0x68. */
-FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("punpckhbw");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhbw);
+}
+
+
/** Opcode 0x0f 0x69. */
-FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("punpckhwd");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhwd);
+}
+
+
/** Opcode 0x0f 0x6a. */
-FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("punpckhdq");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhdq);
+}
+
/** Opcode 0x0f 0x6b. */
FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
+
+
/** Opcode 0x0f 0x6c. */
-FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_punpcklqdq_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("punpcklqdq");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklqdq);
+}
+
+
/** Opcode 0x0f 0x6d. */
-FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_punpckhqdq_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("punpckhqdq");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhqdq);
+}
+
+
/** Opcode 0x0f 0x6e. */
-FNIEMOP_STUB(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey);
+FNIEMOP_DEF(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey)
+{
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP: /* SSE */
+ IEMOP_MNEMONIC("movd/q Wd/q,Ed/q");
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /* XMM, greg*/
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
+ {
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
+ }
+ else
+ {
+ IEM_MC_LOCAL(uint32_t, u32Tmp);
+ IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
+ }
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /* XMM, [mem] */
+ IEM_MC_BEGIN(0, 2);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
+ {
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
+ }
+ else
+ {
+ IEM_MC_LOCAL(uint32_t, u32Tmp);
+ IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
+ }
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ case 0: /* MMX */
+ IEMOP_MNEMONIC("movq/d Pd/q,Ed/q");
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /* MMX, greg */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
+ IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ else
+ IEM_MC_FETCH_GREG_U32_ZX_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /* MMX, [mem] */
+ IEM_MC_BEGIN(0, 2);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
+ {
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
+ }
+ else
+ {
+ IEM_MC_LOCAL(uint32_t, u32Tmp);
+ IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_STORE_MREG_U32_ZX_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u32Tmp);
+ }
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ default:
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
+}
+
+
/** Opcode 0x0f 0x6f. */
-FNIEMOP_STUB(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq);
-/** Opcode 0x0f 0x70. */
-FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib);
+FNIEMOP_DEF(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq)
+{
+ bool fAligned = false;
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
+ fAligned = true;
+ case IEM_OP_PRF_REPZ: /* SSE unaligned */
+ if (fAligned)
+ IEMOP_MNEMONIC("movdqa Vdq,Wdq");
+ else
+ IEMOP_MNEMONIC("movdqu Vdq,Wdq");
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_LOCAL(uint128_t, u128Tmp);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_FETCH_XREG_U128(u128Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(0, 2);
+ IEM_MC_LOCAL(uint128_t, u128Tmp);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ if (fAligned)
+ IEM_MC_FETCH_MEM_U128_ALIGN_SSE(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
+ else
+ IEM_MC_FETCH_MEM_U128(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ case 0: /* MMX */
+ IEMOP_MNEMONIC("movq Pq,Qq");
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
+ /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_FETCH_MREG_U64(u64Tmp, bRm & X86_MODRM_RM_MASK);
+ IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(0, 2);
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ default:
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
+}
+
+
+/** Opcode 0x0f 0x70. The immediate here is evil! */
+FNIEMOP_DEF(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib)
+{
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP: /* SSE */
+ case IEM_OP_PRF_REPNZ: /* SSE */
+ case IEM_OP_PRF_REPZ: /* SSE */
+ {
+ PFNIEMAIMPLMEDIAPSHUF pfnAImpl;
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP:
+ IEMOP_MNEMONIC("pshufd Vdq,Wdq,Ib");
+ pfnAImpl = iemAImpl_pshufd;
+ break;
+ case IEM_OP_PRF_REPNZ:
+ IEMOP_MNEMONIC("pshuflw Vdq,Wdq,Ib");
+ pfnAImpl = iemAImpl_pshuflw;
+ break;
+ case IEM_OP_PRF_REPZ:
+ IEMOP_MNEMONIC("pshufhw Vdq,Wdq,Ib");
+ pfnAImpl = iemAImpl_pshufhw;
+ break;
+ IEM_NOT_REACHED_DEFAULT_CASE_RET();
+ }
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+
+ IEM_MC_BEGIN(3, 0);
+ IEM_MC_ARG(uint128_t *, pDst, 0);
+ IEM_MC_ARG(uint128_t const *, pSrc, 1);
+ IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(3, 2);
+ IEM_MC_ARG(uint128_t *, pDst, 0);
+ IEM_MC_LOCAL(uint128_t, uSrc);
+ IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
+ IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+
+ IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+ }
+
+ case 0: /* MMX Extension */
+ IEMOP_MNEMONIC("pshufw Pq,Qq,Ib");
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+
+ IEM_MC_BEGIN(3, 0);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_ARG(uint64_t const *, pSrc, 1);
+ IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
+ IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
+ IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(3, 2);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_LOCAL(uint64_t, uSrc);
+ IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
+ IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
+
+ IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ default:
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
+}
+
/** Opcode 0x0f 0x71 11/2. */
FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Nq_Ib, uint8_t, bRm);
@@ -1947,7 +2710,7 @@ FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Nq_Ib, uint8_t, bRm);
FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Udq_Ib, uint8_t, bRm);
/** Opcode 0x66 0x0f 0x73 11/3. */
-FNIEMOP_STUB_1(iemOp_Grp14_psrldq_Udq_Ib, uint8_t, bRm);
+FNIEMOP_STUB_1(iemOp_Grp14_psrldq_Udq_Ib, uint8_t, bRm); //NEXT
/** Opcode 0x0f 0x73 11/6. */
FNIEMOP_STUB_1(iemOp_Grp14_psllq_Nq_Ib, uint8_t, bRm);
@@ -1956,7 +2719,7 @@ FNIEMOP_STUB_1(iemOp_Grp14_psllq_Nq_Ib, uint8_t, bRm);
FNIEMOP_STUB_1(iemOp_Grp14_psllq_Udq_Ib, uint8_t, bRm);
/** Opcode 0x66 0x0f 0x73 11/7. */
-FNIEMOP_STUB_1(iemOp_Grp14_pslldq_Udq_Ib, uint8_t, bRm);
+FNIEMOP_STUB_1(iemOp_Grp14_pslldq_Udq_Ib, uint8_t, bRm); //NEXT
/** Opcode 0x0f 0x73. */
@@ -2000,12 +2763,133 @@ FNIEMOP_DEF(iemOp_Grp14)
}
+/**
+ * Common worker for SSE2 and MMX instructions on the forms:
+ * pxxx mm1, mm2/mem64
+ * pxxx xmm1, xmm2/mem128
+ *
+ * Proper alignment of the 128-bit operand is enforced.
+ * Exceptions type 4. SSE2 and MMX cpuid checks.
+ */
+FNIEMOP_DEF_1(iemOpCommonMmxSse2_FullFull_To_Full, PCIEMOPMEDIAF2, pImpl)
+{
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP: /* SSE */
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(2, 0);
+ IEM_MC_ARG(uint128_t *, pDst, 0);
+ IEM_MC_ARG(uint128_t const *, pSrc, 1);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(2, 2);
+ IEM_MC_ARG(uint128_t *, pDst, 0);
+ IEM_MC_LOCAL(uint128_t, uSrc);
+ IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
+
+ IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ case 0: /* MMX */
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
+ /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(2, 0);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_ARG(uint64_t const *, pSrc, 1);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
+ IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(2, 2);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_LOCAL(uint64_t, uSrc);
+ IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
+
+ IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ default:
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
+}
+
+
/** Opcode 0x0f 0x74. */
-FNIEMOP_STUB(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("pcmpeqb");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqb);
+}
+
+
/** Opcode 0x0f 0x75. */
-FNIEMOP_STUB(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("pcmpeqw");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqw);
+}
+
+
/** Opcode 0x0f 0x76. */
-FNIEMOP_STUB(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("pcmpeqd");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqd);
+}
+
+
/** Opcode 0x0f 0x77. */
FNIEMOP_STUB(iemOp_emms);
/** Opcode 0x0f 0x78. */
@@ -2016,10 +2900,211 @@ FNIEMOP_UD_STUB(iemOp_vmwrite);
FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
/** Opcode 0x0f 0x7d. */
FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
+
+
/** Opcode 0x0f 0x7e. */
-FNIEMOP_STUB(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq);
+FNIEMOP_DEF(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq)
+{
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP: /* SSE */
+ IEMOP_MNEMONIC("movd/q Ed/q,Wd/q");
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /* greg, XMM */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
+ {
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
+ }
+ else
+ {
+ IEM_MC_LOCAL(uint32_t, u32Tmp);
+ IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
+ }
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /* [mem], XMM */
+ IEM_MC_BEGIN(0, 2);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
+ {
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
+ }
+ else
+ {
+ IEM_MC_LOCAL(uint32_t, u32Tmp);
+ IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
+ }
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ case 0: /* MMX */
+ IEMOP_MNEMONIC("movq/d Ed/q,Pd/q");
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /* greg, MMX */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
+ {
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
+ }
+ else
+ {
+ IEM_MC_LOCAL(uint32_t, u32Tmp);
+ IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
+ }
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /* [mem], MMX */
+ IEM_MC_BEGIN(0, 2);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
+ {
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
+ }
+ else
+ {
+ IEM_MC_LOCAL(uint32_t, u32Tmp);
+ IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
+ }
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ default:
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
+}
+
+
/** Opcode 0x0f 0x7f. */
-FNIEMOP_STUB(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq);
+FNIEMOP_DEF(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq)
+{
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ bool fAligned = false;
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
+ fAligned = true;
+ case IEM_OP_PRF_REPZ: /* SSE unaligned */
+ if (fAligned)
+ IEMOP_MNEMONIC("movdqa Wdq,Vdq");
+ else
+ IEMOP_MNEMONIC("movdqu Wdq,Vdq");
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_LOCAL(uint128_t, u128Tmp);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_STORE_XREG_U128((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u128Tmp);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(0, 2);
+ IEM_MC_LOCAL(uint128_t, u128Tmp);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ if (fAligned)
+ IEM_MC_STORE_MEM_U128_ALIGN_SSE(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
+ else
+ IEM_MC_STORE_MEM_U128(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ case 0: /* MMX */
+ IEMOP_MNEMONIC("movq Qq,Pq");
+
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register, register.
+ */
+ /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
+ /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_STORE_MREG_U64(bRm & X86_MODRM_RM_MASK, u64Tmp);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * Register, memory.
+ */
+ IEM_MC_BEGIN(0, 2);
+ IEM_MC_LOCAL(uint64_t, u64Tmp);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
+ IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+ default:
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
+}
+
/** Opcode 0x0f 0x80. */
@@ -2609,7 +3694,7 @@ FNIEMOP_DEF(iemOp_seto_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
} IEM_MC_ELSE() {
@@ -2649,7 +3734,7 @@ FNIEMOP_DEF(iemOp_setno_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
} IEM_MC_ELSE() {
@@ -2689,7 +3774,7 @@ FNIEMOP_DEF(iemOp_setc_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
} IEM_MC_ELSE() {
@@ -2729,7 +3814,7 @@ FNIEMOP_DEF(iemOp_setnc_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
} IEM_MC_ELSE() {
@@ -2769,7 +3854,7 @@ FNIEMOP_DEF(iemOp_sete_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
} IEM_MC_ELSE() {
@@ -2809,7 +3894,7 @@ FNIEMOP_DEF(iemOp_setne_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
} IEM_MC_ELSE() {
@@ -2849,7 +3934,7 @@ FNIEMOP_DEF(iemOp_setbe_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
} IEM_MC_ELSE() {
@@ -2889,7 +3974,7 @@ FNIEMOP_DEF(iemOp_setnbe_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
} IEM_MC_ELSE() {
@@ -2929,7 +4014,7 @@ FNIEMOP_DEF(iemOp_sets_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
} IEM_MC_ELSE() {
@@ -2969,7 +4054,7 @@ FNIEMOP_DEF(iemOp_setns_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
} IEM_MC_ELSE() {
@@ -3009,7 +4094,7 @@ FNIEMOP_DEF(iemOp_setp_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
} IEM_MC_ELSE() {
@@ -3049,7 +4134,7 @@ FNIEMOP_DEF(iemOp_setnp_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
} IEM_MC_ELSE() {
@@ -3089,7 +4174,7 @@ FNIEMOP_DEF(iemOp_setl_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
} IEM_MC_ELSE() {
@@ -3129,7 +4214,7 @@ FNIEMOP_DEF(iemOp_setnl_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
} IEM_MC_ELSE() {
@@ -3169,7 +4254,7 @@ FNIEMOP_DEF(iemOp_setle_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
} IEM_MC_ELSE() {
@@ -3209,7 +4294,7 @@ FNIEMOP_DEF(iemOp_setnle_Eb)
/* memory target */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
} IEM_MC_ELSE() {
@@ -3247,7 +4332,7 @@ FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(uint32_t, u32Value);
IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
- IEM_MC_PUSH_U32(u32Value);
+ IEM_MC_PUSH_U32_SREG(u32Value);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
break;
@@ -3336,6 +4421,7 @@ FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -3383,7 +4469,7 @@ FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(int16_t, i16AddrAdj);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_ASSIGN(i16AddrAdj, u16Src);
IEM_MC_AND_ARG_U16(u16Src, 0x0f);
@@ -3412,7 +4498,7 @@ FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(int32_t, i32AddrAdj);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_ASSIGN(i32AddrAdj, u32Src);
IEM_MC_AND_ARG_U32(u32Src, 0x1f);
@@ -3441,7 +4527,7 @@ FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(int64_t, i64AddrAdj);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_ASSIGN(i64AddrAdj, u64Src);
IEM_MC_AND_ARG_U64(u64Src, 0x3f);
@@ -3520,6 +4606,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -3557,7 +4644,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
IEM_MC_ASSIGN(cShiftArg, cShift);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
@@ -3579,7 +4666,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
IEM_MC_ASSIGN(cShiftArg, cShift);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
@@ -3601,7 +4688,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
IEM_MC_ASSIGN(cShiftArg, cShift);
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
@@ -3666,6 +4753,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -3704,7 +4792,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -3725,7 +4813,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -3746,7 +4834,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -3832,15 +4920,16 @@ FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
FNIEMOP_DEF_1(iemOp_Grp15_fxsave, uint8_t, bRm)
{
IEMOP_MNEMONIC("fxsave m512");
- IEMOP_HLP_NO_LOCK_PREFIX();
if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
- return IEMOP_RAISE_INVALID_LOCK_PREFIX();
+ return IEMOP_RAISE_INVALID_OPCODE();
IEM_MC_BEGIN(3, 1);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
+ IEM_MC_ARG(uint8_t, iEffSeg, 0);
IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_fxsave, iEffSeg, GCPtrEff, enmEffOpSize);
IEM_MC_END();
return VINF_SUCCESS;
@@ -3851,15 +4940,16 @@ FNIEMOP_DEF_1(iemOp_Grp15_fxsave, uint8_t, bRm)
FNIEMOP_DEF_1(iemOp_Grp15_fxrstor, uint8_t, bRm)
{
IEMOP_MNEMONIC("fxrstor m512");
- IEMOP_HLP_NO_LOCK_PREFIX();
if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
- return IEMOP_RAISE_INVALID_LOCK_PREFIX();
+ return IEMOP_RAISE_INVALID_OPCODE();
IEM_MC_BEGIN(3, 1);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0);
+ IEM_MC_ARG(uint8_t, iEffSeg, 0);
IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_fxrstor, iEffSeg, GCPtrEff, enmEffOpSize);
IEM_MC_END();
return VINF_SUCCESS;
@@ -3884,14 +4974,63 @@ FNIEMOP_UD_STUB_1(iemOp_Grp15_xsaveopt, uint8_t, bRm);
/** Opcode 0x0f 0xae mem/7. */
FNIEMOP_STUB_1(iemOp_Grp15_clflush, uint8_t, bRm);
+
/** Opcode 0x0f 0xae 11b/5. */
-FNIEMOP_STUB_1(iemOp_Grp15_lfence, uint8_t, bRm);
+FNIEMOP_DEF_1(iemOp_Grp15_lfence, uint8_t, bRm)
+{
+ IEMOP_MNEMONIC("lfence");
+ IEMOP_HLP_NO_LOCK_PREFIX();
+ if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
+ return IEMOP_RAISE_INVALID_OPCODE();
+
+ IEM_MC_BEGIN(0, 0);
+ if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
+ IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence);
+ else
+ IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ return VINF_SUCCESS;
+}
+
/** Opcode 0x0f 0xae 11b/6. */
-FNIEMOP_STUB_1(iemOp_Grp15_mfence, uint8_t, bRm);
+FNIEMOP_DEF_1(iemOp_Grp15_mfence, uint8_t, bRm)
+{
+ IEMOP_MNEMONIC("mfence");
+ IEMOP_HLP_NO_LOCK_PREFIX();
+ if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
+ return IEMOP_RAISE_INVALID_OPCODE();
+
+ IEM_MC_BEGIN(0, 0);
+ if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
+ IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence);
+ else
+ IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ return VINF_SUCCESS;
+}
+
/** Opcode 0x0f 0xae 11b/7. */
-FNIEMOP_STUB_1(iemOp_Grp15_sfence, uint8_t, bRm);
+FNIEMOP_DEF_1(iemOp_Grp15_sfence, uint8_t, bRm)
+{
+ IEMOP_MNEMONIC("sfence");
+ IEMOP_HLP_NO_LOCK_PREFIX();
+ if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
+ return IEMOP_RAISE_INVALID_OPCODE();
+
+ IEM_MC_BEGIN(0, 0);
+ if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
+ IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence);
+ else
+ IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ return VINF_SUCCESS;
+}
+
/** Opcode 0xf3 0x0f 0xae 11b/0. */
FNIEMOP_UD_STUB_1(iemOp_Grp15_rdfsbase, uint8_t, bRm);
@@ -4012,7 +5151,7 @@ FNIEMOP_DEF(iemOp_cmpxchg_Eb_Gb)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(uint8_t, u8Al);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING();
IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
@@ -4134,7 +5273,7 @@ FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(uint16_t, u16Ax);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING();
IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
@@ -4162,7 +5301,7 @@ FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(uint32_t, u32Eax);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING();
IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
@@ -4194,7 +5333,7 @@ FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(uint64_t, u64Rax);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING();
IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
IEM_MC_FETCH_GREG_U64(u64Rax, X86_GREG_xAX);
@@ -4227,14 +5366,9 @@ FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv)
}
-FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
+FNIEMOP_DEF_2(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg, uint8_t, bRm)
{
- uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
- IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
-
- /* The source cannot be a register. */
- if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
- return IEMOP_RAISE_INVALID_OPCODE();
+ Assert((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)); /* Caller checks this */
uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
switch (pIemCpu->enmEffOpSize)
@@ -4247,7 +5381,8 @@ FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
@@ -4262,7 +5397,8 @@ FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
@@ -4277,8 +5413,12 @@ FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
- IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ if (IEM_IS_GUEST_CPU_AMD(pIemCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */
+ IEM_MC_FETCH_MEM_U32_SX_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
+ else
+ IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
IEM_MC_END();
@@ -4293,7 +5433,10 @@ FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
FNIEMOP_DEF(iemOp_lss_Gv_Mp)
{
IEMOP_MNEMONIC("lss Gv,Mp");
- return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_SS);
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ return IEMOP_RAISE_INVALID_OPCODE();
+ return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_SS, bRm);
}
@@ -4309,7 +5452,10 @@ FNIEMOP_DEF(iemOp_btr_Ev_Gv)
FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
{
IEMOP_MNEMONIC("lfs Gv,Mp");
- return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_FS);
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ return IEMOP_RAISE_INVALID_OPCODE();
+ return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_FS, bRm);
}
@@ -4317,7 +5463,10 @@ FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
{
IEMOP_MNEMONIC("lgs Gv,Mp");
- return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_GS);
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ return IEMOP_RAISE_INVALID_OPCODE();
+ return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_GS, bRm);
}
@@ -4377,7 +5526,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_ADVANCE_RIP();
@@ -4388,7 +5537,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint32_t, u32Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_ADVANCE_RIP();
@@ -4399,7 +5548,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint64_t, u64Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_ADVANCE_RIP();
@@ -4457,7 +5606,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint32_t, u32Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_ADVANCE_RIP();
@@ -4468,7 +5617,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint64_t, u64Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_ADVANCE_RIP();
@@ -4540,6 +5689,7 @@ FNIEMOP_DEF(iemOp_Grp8)
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -4584,16 +5734,16 @@ FNIEMOP_DEF(iemOp_Grp8)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
IEM_MC_FETCH_EFLAGS(EFlags);
- IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
+ IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
else
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
- IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
+ IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
IEM_MC_COMMIT_EFLAGS(EFlags);
IEM_MC_ADVANCE_RIP();
@@ -4607,16 +5757,16 @@ FNIEMOP_DEF(iemOp_Grp8)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
IEM_MC_FETCH_EFLAGS(EFlags);
- IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
+ IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
else
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
- IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
+ IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
IEM_MC_COMMIT_EFLAGS(EFlags);
IEM_MC_ADVANCE_RIP();
@@ -4630,16 +5780,16 @@ FNIEMOP_DEF(iemOp_Grp8)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
IEM_MC_FETCH_EFLAGS(EFlags);
- IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
+ IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
else
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
- IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
+ IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
IEM_MC_COMMIT_EFLAGS(EFlags);
IEM_MC_ADVANCE_RIP();
@@ -4735,7 +5885,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_ADVANCE_RIP();
@@ -4746,7 +5896,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint32_t, u32Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_ADVANCE_RIP();
@@ -4757,7 +5907,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint64_t, u64Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_ADVANCE_RIP();
@@ -4815,7 +5965,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint32_t, u32Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_ADVANCE_RIP();
@@ -4826,7 +5976,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint64_t, u64Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_ADVANCE_RIP();
@@ -4875,7 +6025,7 @@ FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
IEM_MC_LOCAL(uint8_t, u8RegCopy);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_GREG_U8(u8RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
@@ -4937,6 +6087,8 @@ FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -4974,7 +6126,7 @@ FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
IEM_MC_LOCAL(uint16_t, u16RegCopy);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_GREG_U16(u16RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
@@ -4999,7 +6151,7 @@ FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
IEM_MC_LOCAL(uint32_t, u32RegCopy);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_GREG_U32(u32RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
@@ -5024,7 +6176,7 @@ FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
IEM_MC_LOCAL(uint64_t, u64RegCopy);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_GREG_U64(u64RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
@@ -5076,7 +6228,7 @@ FNIEMOP_DEF_1(iemOp_Grp9_cmpxchg8b_Mq, uint8_t, bRm)
IEM_MC_LOCAL(RTUINT64U, u64EbxEcx);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING();
IEM_MC_MEM_MAP(pu64MemDst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -5217,7 +6369,10 @@ FNIEMOP_DEF_1(iemOpCommonBswapGReg, uint8_t, iReg)
FNIEMOP_DEF(iemOp_bswap_rAX_r8)
{
IEMOP_MNEMONIC("bswap rAX/r8");
- return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexReg);
+ /* Note! Intel manuals states that R8-R15 can be accessed by using a REX.X
+ prefix. REX.B is the correct prefix it appears. For a parallel
+ case, see iemOp_mov_AL_Ib and iemOp_mov_eAX_Iv. */
+ return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexB);
}
@@ -5225,7 +6380,7 @@ FNIEMOP_DEF(iemOp_bswap_rAX_r8)
FNIEMOP_DEF(iemOp_bswap_rCX_r9)
{
IEMOP_MNEMONIC("bswap rCX/r9");
- return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexReg);
+ return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexB);
}
@@ -5233,7 +6388,7 @@ FNIEMOP_DEF(iemOp_bswap_rCX_r9)
FNIEMOP_DEF(iemOp_bswap_rDX_r10)
{
IEMOP_MNEMONIC("bswap rDX/r9");
- return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexReg);
+ return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexB);
}
@@ -5241,7 +6396,7 @@ FNIEMOP_DEF(iemOp_bswap_rDX_r10)
FNIEMOP_DEF(iemOp_bswap_rBX_r11)
{
IEMOP_MNEMONIC("bswap rBX/r9");
- return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexReg);
+ return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexB);
}
@@ -5249,7 +6404,7 @@ FNIEMOP_DEF(iemOp_bswap_rBX_r11)
FNIEMOP_DEF(iemOp_bswap_rSP_r12)
{
IEMOP_MNEMONIC("bswap rSP/r12");
- return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexReg);
+ return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexB);
}
@@ -5257,7 +6412,7 @@ FNIEMOP_DEF(iemOp_bswap_rSP_r12)
FNIEMOP_DEF(iemOp_bswap_rBP_r13)
{
IEMOP_MNEMONIC("bswap rBP/r13");
- return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexReg);
+ return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexB);
}
@@ -5265,7 +6420,7 @@ FNIEMOP_DEF(iemOp_bswap_rBP_r13)
FNIEMOP_DEF(iemOp_bswap_rSI_r14)
{
IEMOP_MNEMONIC("bswap rSI/r14");
- return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexReg);
+ return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexB);
}
@@ -5273,7 +6428,7 @@ FNIEMOP_DEF(iemOp_bswap_rSI_r14)
FNIEMOP_DEF(iemOp_bswap_rDI_r15)
{
IEMOP_MNEMONIC("bswap rDI/r15");
- return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexReg);
+ return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexB);
}
@@ -5292,8 +6447,57 @@ FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
/** Opcode 0x0f 0xd6. */
FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
+
+
/** Opcode 0x0f 0xd7. */
-FNIEMOP_STUB(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq);
+FNIEMOP_DEF(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq)
+{
+ /* Docs says register only. */
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) /** @todo test that this is registers only. */
+ return IEMOP_RAISE_INVALID_OPCODE();
+
+ /* Note! Taking the lazy approch here wrt the high 32-bits of the GREG. */
+ /** @todo testcase: Check that the instruction implicitly clears the high
+ * bits in 64-bit mode. The REX.W is first necessary when VLMAX > 256
+ * and opcode modifications are made to work with the whole width (not
+ * just 128). */
+ switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
+ {
+ case IEM_OP_PRF_SIZE_OP: /* SSE */
+ IEMOP_MNEMONIC("pmovmskb Gd,Nq");
+ IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_SSE | DISOPTYPE_HARMLESS);
+ IEM_MC_BEGIN(2, 0);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_ARG(uint128_t const *, pSrc, 1);
+ IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
+ IEM_MC_REF_GREG_U64(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
+ IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_pmovmskb_u128, pDst, pSrc);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ return VINF_SUCCESS;
+
+ case 0: /* MMX */
+ IEMOP_MNEMONIC("pmovmskb Gd,Udq");
+ IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_MMX | DISOPTYPE_HARMLESS);
+ IEM_MC_BEGIN(2, 0);
+ IEM_MC_ARG(uint64_t *, pDst, 0);
+ IEM_MC_ARG(uint64_t const *, pSrc, 1);
+ IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
+ IEM_MC_REF_GREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
+ IEM_MC_CALL_MMX_AIMPL_2(iemAImpl_pmovmskb_u64, pDst, pSrc);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ return VINF_SUCCESS;
+
+ default:
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
+}
+
+
/** Opcode 0x0f 0xd8. */
FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
/** Opcode 0x0f 0xd9. */
@@ -5340,8 +6544,16 @@ FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
/** Opcode 0x0f 0xee. */
FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
+
+
/** Opcode 0x0f 0xef. */
-FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq);
+FNIEMOP_DEF(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq)
+{
+ IEMOP_MNEMONIC("pxor");
+ return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pxor);
+}
+
+
/** Opcode 0x0f 0xf0. */
FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
/** Opcode 0x0f 0xf1. */
@@ -5359,7 +6571,7 @@ FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
/** Opcode 0x0f 0xf7. */
FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
/** Opcode 0x0f 0xf8. */
-FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq);
+FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq); //NEXT
/** Opcode 0x0f 0xf9. */
FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
/** Opcode 0x0f 0xfa. */
@@ -5966,6 +7178,7 @@ FNIEMOP_DEF(iemOp_and_eAX_Iz)
/** Opcode 0x26. */
FNIEMOP_DEF(iemOp_seg_ES)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg es");
pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
pIemCpu->iEffSeg = X86_SREG_ES;
@@ -5975,7 +7188,14 @@ FNIEMOP_DEF(iemOp_seg_ES)
/** Opcode 0x27. */
-FNIEMOP_STUB(iemOp_daa);
+FNIEMOP_DEF(iemOp_daa)
+{
+ IEMOP_MNEMONIC("daa AL");
+ IEMOP_HLP_NO_64BIT();
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF);
+ return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_daa);
+}
/** Opcode 0x28. */
@@ -6029,6 +7249,7 @@ FNIEMOP_DEF(iemOp_sub_eAX_Iz)
/** Opcode 0x2e. */
FNIEMOP_DEF(iemOp_seg_CS)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg cs");
pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
pIemCpu->iEffSeg = X86_SREG_CS;
@@ -6038,7 +7259,14 @@ FNIEMOP_DEF(iemOp_seg_CS)
/** Opcode 0x2f. */
-FNIEMOP_STUB(iemOp_das);
+FNIEMOP_DEF(iemOp_das)
+{
+ IEMOP_MNEMONIC("das AL");
+ IEMOP_HLP_NO_64BIT();
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF);
+ return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_das);
+}
/** Opcode 0x30. */
@@ -6098,6 +7326,7 @@ FNIEMOP_DEF(iemOp_xor_eAX_Iz)
/** Opcode 0x36. */
FNIEMOP_DEF(iemOp_seg_SS)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ss");
pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
pIemCpu->iEffSeg = X86_SREG_SS;
@@ -6163,6 +7392,7 @@ FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
/** Opcode 0x3e. */
FNIEMOP_DEF(iemOp_seg_DS)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ds");
pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
pIemCpu->iEffSeg = X86_SREG_DS;
@@ -6200,6 +7430,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
IEM_MC_REF_GREG_U32(pu32Dst, iReg);
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -6227,6 +7458,7 @@ FNIEMOP_DEF(iemOp_inc_eAX)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
@@ -6246,6 +7478,7 @@ FNIEMOP_DEF(iemOp_inc_eCX)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.b");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
pIemCpu->uRexB = 1 << 3;
@@ -6266,6 +7499,7 @@ FNIEMOP_DEF(iemOp_inc_eDX)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.x");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
pIemCpu->uRexIndex = 1 << 3;
@@ -6287,6 +7521,7 @@ FNIEMOP_DEF(iemOp_inc_eBX)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bx");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
pIemCpu->uRexB = 1 << 3;
pIemCpu->uRexIndex = 1 << 3;
@@ -6308,6 +7543,7 @@ FNIEMOP_DEF(iemOp_inc_eSP)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.r");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
pIemCpu->uRexReg = 1 << 3;
@@ -6328,6 +7564,7 @@ FNIEMOP_DEF(iemOp_inc_eBP)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rb");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
pIemCpu->uRexReg = 1 << 3;
pIemCpu->uRexB = 1 << 3;
@@ -6349,6 +7586,7 @@ FNIEMOP_DEF(iemOp_inc_eSI)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rx");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
pIemCpu->uRexReg = 1 << 3;
pIemCpu->uRexIndex = 1 << 3;
@@ -6370,6 +7608,7 @@ FNIEMOP_DEF(iemOp_inc_eDI)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbx");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
pIemCpu->uRexReg = 1 << 3;
pIemCpu->uRexB = 1 << 3;
@@ -6392,6 +7631,7 @@ FNIEMOP_DEF(iemOp_dec_eAX)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.w");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
iemRecalEffOpSize(pIemCpu);
@@ -6412,6 +7652,7 @@ FNIEMOP_DEF(iemOp_dec_eCX)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bw");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
pIemCpu->uRexB = 1 << 3;
iemRecalEffOpSize(pIemCpu);
@@ -6433,6 +7674,7 @@ FNIEMOP_DEF(iemOp_dec_eDX)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.xw");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
pIemCpu->uRexIndex = 1 << 3;
iemRecalEffOpSize(pIemCpu);
@@ -6454,6 +7696,7 @@ FNIEMOP_DEF(iemOp_dec_eBX)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bxw");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
pIemCpu->uRexB = 1 << 3;
pIemCpu->uRexIndex = 1 << 3;
@@ -6476,6 +7719,7 @@ FNIEMOP_DEF(iemOp_dec_eSP)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rw");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
pIemCpu->uRexReg = 1 << 3;
iemRecalEffOpSize(pIemCpu);
@@ -6497,6 +7741,7 @@ FNIEMOP_DEF(iemOp_dec_eBP)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbw");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
pIemCpu->uRexReg = 1 << 3;
pIemCpu->uRexB = 1 << 3;
@@ -6519,6 +7764,7 @@ FNIEMOP_DEF(iemOp_dec_eSI)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rxw");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
pIemCpu->uRexReg = 1 << 3;
pIemCpu->uRexIndex = 1 << 3;
@@ -6541,6 +7787,7 @@ FNIEMOP_DEF(iemOp_dec_eDI)
*/
if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbxw");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
pIemCpu->uRexReg = 1 << 3;
pIemCpu->uRexB = 1 << 3;
@@ -6680,10 +7927,6 @@ FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
}
-/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
- * handle it, for that matter (Intel pseudo code hints that the popped
- * value is incremented by the stack item size.) Test it, both encodings
- * and all three register sizes. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
@@ -6700,6 +7943,7 @@ FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
IEM_MC_LOCAL(uint32_t, *pu32Dst);
IEM_MC_REF_GREG_U32(pu32Dst, iReg);
IEM_MC_POP_U32(pu32Dst);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); /** @todo testcase*/
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
break;
@@ -6754,7 +7998,48 @@ FNIEMOP_DEF(iemOp_pop_eBX)
FNIEMOP_DEF(iemOp_pop_eSP)
{
IEMOP_MNEMONIC("pop rSP");
- return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
+ if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
+ {
+ if (pIemCpu->uRexB)
+ return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
+ pIemCpu->enmDefOpSize = IEMMODE_64BIT;
+ pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
+ }
+
+ IEMOP_HLP_DECODED_NL_1(OP_POP, IEMOPFORM_FIXED, OP_PARM_REG_ESP,
+ DISOPTYPE_HARMLESS | DISOPTYPE_DEFAULT_64_OP_SIZE | DISOPTYPE_REXB_EXTENDS_OPREG);
+ /** @todo add testcase for this instruction. */
+ switch (pIemCpu->enmEffOpSize)
+ {
+ case IEMMODE_16BIT:
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_LOCAL(uint16_t, u16Dst);
+ IEM_MC_POP_U16(&u16Dst); /** @todo not correct MC, fix later. */
+ IEM_MC_STORE_GREG_U16(X86_GREG_xSP, u16Dst);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ break;
+
+ case IEMMODE_32BIT:
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_LOCAL(uint32_t, u32Dst);
+ IEM_MC_POP_U32(&u32Dst);
+ IEM_MC_STORE_GREG_U32(X86_GREG_xSP, u32Dst);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ break;
+
+ case IEMMODE_64BIT:
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_LOCAL(uint64_t, u64Dst);
+ IEM_MC_POP_U64(&u64Dst);
+ IEM_MC_STORE_GREG_U64(X86_GREG_xSP, u64Dst);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ break;
+ }
+
+ return VINF_SUCCESS;
}
@@ -6808,13 +8093,105 @@ FNIEMOP_DEF(iemOp_popa)
/** Opcode 0x62. */
FNIEMOP_STUB(iemOp_bound_Gv_Ma);
-/** Opcode 0x63. */
-FNIEMOP_STUB(iemOp_arpl_Ew_Gw);
+
+
+/** Opcode 0x63 - non-64-bit modes. */
+FNIEMOP_DEF(iemOp_arpl_Ew_Gw)
+{
+ IEMOP_MNEMONIC("arpl Ew,Gw");
+ IEMOP_HLP_NO_REAL_OR_V86_MODE();
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /* Register */
+ IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
+ IEM_MC_BEGIN(3, 0);
+ IEM_MC_ARG(uint16_t *, pu16Dst, 0);
+ IEM_MC_ARG(uint16_t, u16Src, 1);
+ IEM_MC_ARG(uint32_t *, pEFlags, 2);
+
+ IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK));
+ IEM_MC_REF_EFLAGS(pEFlags);
+ IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
+
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /* Memory */
+ IEM_MC_BEGIN(3, 2);
+ IEM_MC_ARG(uint16_t *, pu16Dst, 0);
+ IEM_MC_ARG(uint16_t, u16Src, 1);
+ IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
+
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
+ IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
+ IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
+ IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
+ IEM_MC_FETCH_EFLAGS(EFlags);
+ IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
+
+ IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
+ IEM_MC_COMMIT_EFLAGS(EFlags);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+
+}
+
+
+/** Opcode 0x63.
+ * @note This is a weird one. It works like a regular move instruction if
+ * REX.W isn't set, at least according to AMD docs (rev 3.15, 2009-11).
+ * @todo This definitely needs a testcase to verify the odd cases. */
+FNIEMOP_DEF(iemOp_movsxd_Gv_Ev)
+{
+ Assert(pIemCpu->enmEffOpSize == IEMMODE_64BIT); /* Caller branched already . */
+
+ IEMOP_MNEMONIC("movsxd Gv,Ev");
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+
+ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ /*
+ * Register to register.
+ */
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_BEGIN(0, 1);
+ IEM_MC_LOCAL(uint64_t, u64Value);
+ IEM_MC_FETCH_GREG_U32_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
+ IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ else
+ {
+ /*
+ * We're loading a register from memory.
+ */
+ IEM_MC_BEGIN(0, 2);
+ IEM_MC_LOCAL(uint64_t, u64Value);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_FETCH_MEM_U32_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
+ IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
+ IEM_MC_ADVANCE_RIP();
+ IEM_MC_END();
+ }
+ return VINF_SUCCESS;
+}
/** Opcode 0x64. */
FNIEMOP_DEF(iemOp_seg_FS)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg fs");
pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
pIemCpu->iEffSeg = X86_SREG_FS;
@@ -6826,6 +8203,7 @@ FNIEMOP_DEF(iemOp_seg_FS)
/** Opcode 0x65. */
FNIEMOP_DEF(iemOp_seg_GS)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg gs");
pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
pIemCpu->iEffSeg = X86_SREG_GS;
@@ -6837,6 +8215,7 @@ FNIEMOP_DEF(iemOp_seg_GS)
/** Opcode 0x66. */
FNIEMOP_DEF(iemOp_op_size)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("op size");
pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
iemRecalEffOpSize(pIemCpu);
@@ -6848,6 +8227,7 @@ FNIEMOP_DEF(iemOp_op_size)
/** Opcode 0x67. */
FNIEMOP_DEF(iemOp_addr_size)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("addr size");
pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
switch (pIemCpu->enmDefAddrMode)
{
@@ -6949,7 +8329,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
IEM_MC_LOCAL(uint16_t, u16Tmp);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
IEM_MC_ASSIGN(u16Src, u16Imm);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
@@ -6998,7 +8378,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
IEM_MC_LOCAL(uint32_t, u32Tmp);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
IEM_MC_ASSIGN(u32Src, u32Imm);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
@@ -7047,7 +8427,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
IEM_MC_LOCAL(uint64_t, u64Tmp);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
IEM_MC_ASSIGN(u64Src, u64Imm);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
@@ -7135,7 +8515,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
IEM_MC_LOCAL(uint16_t, u16Tmp);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint16_t u16Imm; IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16Imm);
IEM_MC_ASSIGN(u16Src, u16Imm);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
@@ -7182,7 +8562,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
IEM_MC_LOCAL(uint32_t, u32Tmp);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint32_t u32Imm; IEM_OPCODE_GET_NEXT_S8_SX_U32(&u32Imm);
IEM_MC_ASSIGN(u32Src, u32Imm);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
@@ -7229,7 +8609,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
IEM_MC_LOCAL(uint64_t, u64Tmp);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S8_SX_U64(&u64Imm);
IEM_MC_ASSIGN(u64Src, u64Imm);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
@@ -7257,9 +8637,9 @@ FNIEMOP_DEF(iemOp_insb_Yb_DX)
IEMOP_MNEMONIC("rep ins Yb,DX");
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr16);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr32);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr64);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr16, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr32, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr64, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
}
@@ -7268,9 +8648,9 @@ FNIEMOP_DEF(iemOp_insb_Yb_DX)
IEMOP_MNEMONIC("ins Yb,DX");
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr16);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr32);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr64);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr16, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr32, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr64, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
}
@@ -7289,9 +8669,9 @@ FNIEMOP_DEF(iemOp_inswd_Yv_DX)
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr16);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr32);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr64);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr16, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr32, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr64, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
break;
@@ -7299,9 +8679,9 @@ FNIEMOP_DEF(iemOp_inswd_Yv_DX)
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr16);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr32);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr64);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr16, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr32, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr64, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
break;
@@ -7316,9 +8696,9 @@ FNIEMOP_DEF(iemOp_inswd_Yv_DX)
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr16);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr32);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr64);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr16, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr32, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr64, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
break;
@@ -7326,9 +8706,9 @@ FNIEMOP_DEF(iemOp_inswd_Yv_DX)
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr16);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr32);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr64);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr16, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr32, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr64, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
break;
@@ -7347,9 +8727,9 @@ FNIEMOP_DEF(iemOp_outsb_Yb_DX)
IEMOP_MNEMONIC("rep out DX,Yb");
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
}
@@ -7358,9 +8738,9 @@ FNIEMOP_DEF(iemOp_outsb_Yb_DX)
IEMOP_MNEMONIC("out DX,Yb");
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
}
@@ -7379,9 +8759,9 @@ FNIEMOP_DEF(iemOp_outswd_Yv_DX)
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
break;
@@ -7389,9 +8769,9 @@ FNIEMOP_DEF(iemOp_outswd_Yv_DX)
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
break;
@@ -7406,9 +8786,9 @@ FNIEMOP_DEF(iemOp_outswd_Yv_DX)
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
break;
@@ -7416,9 +8796,9 @@ FNIEMOP_DEF(iemOp_outswd_Yv_DX)
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
- case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg);
- case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg);
- case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg);
+ case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg, false);
+ case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg, false);
+ case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg, false);
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
break;
@@ -7771,7 +9151,7 @@ FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
@@ -7836,7 +9216,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
IEM_MC_ASSIGN(u16Src, u16Imm);
IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -7869,6 +9249,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
@@ -7890,7 +9271,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
IEM_MC_ASSIGN(u32Src, u32Imm);
IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -7944,7 +9325,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
IEM_MC_ASSIGN(u64Src, u64Imm);
IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -8016,6 +9397,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
@@ -8063,7 +9445,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -8088,7 +9470,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -8113,7 +9495,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -8191,7 +9573,7 @@ FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
IEM_MC_ARG(uint8_t *, pu8Reg, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
@@ -8278,7 +9660,7 @@ FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
IEM_MC_ARG(uint16_t *, pu16Reg, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
@@ -8294,12 +9676,13 @@ FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
IEM_MC_ARG(uint32_t *, pu32Reg, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -8310,7 +9693,7 @@ FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
IEM_MC_ARG(uint64_t *, pu64Reg, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
@@ -8355,7 +9738,7 @@ FNIEMOP_DEF(iemOp_mov_Eb_Gb)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint8_t, u8Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
IEM_MC_ADVANCE_RIP();
@@ -8420,7 +9803,7 @@ FNIEMOP_DEF(iemOp_mov_Ev_Gv)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
IEM_MC_ADVANCE_RIP();
@@ -8431,7 +9814,7 @@ FNIEMOP_DEF(iemOp_mov_Ev_Gv)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint32_t, u32Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
IEM_MC_ADVANCE_RIP();
@@ -8442,7 +9825,7 @@ FNIEMOP_DEF(iemOp_mov_Ev_Gv)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint64_t, u64Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
IEM_MC_ADVANCE_RIP();
@@ -8482,7 +9865,7 @@ FNIEMOP_DEF(iemOp_mov_Gb_Eb)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint8_t, u8Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
IEM_MC_ADVANCE_RIP();
@@ -8546,7 +9929,7 @@ FNIEMOP_DEF(iemOp_mov_Gv_Ev)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_ADVANCE_RIP();
@@ -8557,7 +9940,7 @@ FNIEMOP_DEF(iemOp_mov_Gv_Ev)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint32_t, u32Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_ADVANCE_RIP();
@@ -8568,7 +9951,7 @@ FNIEMOP_DEF(iemOp_mov_Gv_Ev)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint64_t, u64Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_ADVANCE_RIP();
@@ -8580,6 +9963,17 @@ FNIEMOP_DEF(iemOp_mov_Gv_Ev)
}
+/** Opcode 0x63. */
+FNIEMOP_DEF(iemOp_arpl_Ew_Gw_movsx_Gv_Ev)
+{
+ if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
+ return FNIEMOP_CALL(iemOp_arpl_Ew_Gw);
+ if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
+ return FNIEMOP_CALL(iemOp_mov_Gv_Ev);
+ return FNIEMOP_CALL(iemOp_movsxd_Gv_Ev);
+}
+
+
/** Opcode 0x8c. */
FNIEMOP_DEF(iemOp_mov_Ev_Sw)
{
@@ -8644,7 +10038,7 @@ FNIEMOP_DEF(iemOp_mov_Ev_Sw)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Value);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
IEM_MC_ADVANCE_RIP();
@@ -8663,7 +10057,7 @@ FNIEMOP_DEF(iemOp_lea_Gv_M)
uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
- return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */
+ return IEMOP_RAISE_INVALID_OPCODE(); /* no register form */
switch (pIemCpu->enmEffOpSize)
{
@@ -8671,7 +10065,7 @@ FNIEMOP_DEF(iemOp_lea_Gv_M)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
IEM_MC_LOCAL(uint16_t, u16Cast);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
IEM_MC_ADVANCE_RIP();
@@ -8682,7 +10076,7 @@ FNIEMOP_DEF(iemOp_lea_Gv_M)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
IEM_MC_LOCAL(uint32_t, u32Cast);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
IEM_MC_ADVANCE_RIP();
@@ -8692,7 +10086,7 @@ FNIEMOP_DEF(iemOp_lea_Gv_M)
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
@@ -8748,7 +10142,7 @@ FNIEMOP_DEF(iemOp_mov_Sw_Ev)
IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
IEM_MC_ARG(uint16_t, u16Value, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
IEM_MC_END();
@@ -8795,7 +10189,7 @@ FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm)
uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
RTGCPTR GCPtrEff;
VBOXSTRICTRC rcStrict;
- rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
+ rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
pIemCpu->offOpcode = offOpcodeSaved;
@@ -8804,12 +10198,12 @@ FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm)
uint64_t const RspSaved = pCtx->rsp;
switch (pIemCpu->enmEffOpSize)
{
- case IEMMODE_16BIT: iemRegAddToRsp(pCtx, 2); break;
- case IEMMODE_32BIT: iemRegAddToRsp(pCtx, 4); break;
- case IEMMODE_64BIT: iemRegAddToRsp(pCtx, 8); break;
+ case IEMMODE_16BIT: iemRegAddToRsp(pIemCpu, pCtx, 2); break;
+ case IEMMODE_32BIT: iemRegAddToRsp(pIemCpu, pCtx, 4); break;
+ case IEMMODE_64BIT: iemRegAddToRsp(pIemCpu, pCtx, 8); break;
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
- rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
+ rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
Assert(rcStrict == VINF_SUCCESS);
pCtx->rsp = RspSaved;
@@ -8841,7 +10235,7 @@ FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm)
uint64_t u64Value;
rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
if (rcStrict == VINF_SUCCESS)
- rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
+ rcStrict = iemMemStoreDataU64(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
break;
}
@@ -8850,7 +10244,7 @@ FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm)
if (rcStrict == VINF_SUCCESS)
{
pCtx->rsp = TmpRsp.u;
- iemRegUpdateRip(pIemCpu);
+ iemRegUpdateRipAndClearRF(pIemCpu);
}
return rcStrict;
@@ -10178,7 +11572,7 @@ FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
FNIEMOP_DEF(iemOp_mov_AL_Ib)
{
IEMOP_MNEMONIC("mov AL,Ib");
- return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX);
+ return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX | pIemCpu->uRexB);
}
@@ -10186,7 +11580,7 @@ FNIEMOP_DEF(iemOp_mov_AL_Ib)
FNIEMOP_DEF(iemOp_CL_Ib)
{
IEMOP_MNEMONIC("mov CL,Ib");
- return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX);
+ return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX | pIemCpu->uRexB);
}
@@ -10194,7 +11588,7 @@ FNIEMOP_DEF(iemOp_CL_Ib)
FNIEMOP_DEF(iemOp_DL_Ib)
{
IEMOP_MNEMONIC("mov DL,Ib");
- return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX);
+ return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX | pIemCpu->uRexB);
}
@@ -10202,7 +11596,7 @@ FNIEMOP_DEF(iemOp_DL_Ib)
FNIEMOP_DEF(iemOp_BL_Ib)
{
IEMOP_MNEMONIC("mov BL,Ib");
- return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX);
+ return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX | pIemCpu->uRexB);
}
@@ -10210,7 +11604,7 @@ FNIEMOP_DEF(iemOp_BL_Ib)
FNIEMOP_DEF(iemOp_mov_AH_Ib)
{
IEMOP_MNEMONIC("mov AH,Ib");
- return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP);
+ return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP | pIemCpu->uRexB);
}
@@ -10218,7 +11612,7 @@ FNIEMOP_DEF(iemOp_mov_AH_Ib)
FNIEMOP_DEF(iemOp_CH_Ib)
{
IEMOP_MNEMONIC("mov CH,Ib");
- return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP);
+ return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP | pIemCpu->uRexB);
}
@@ -10226,7 +11620,7 @@ FNIEMOP_DEF(iemOp_CH_Ib)
FNIEMOP_DEF(iemOp_DH_Ib)
{
IEMOP_MNEMONIC("mov DH,Ib");
- return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI);
+ return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI | pIemCpu->uRexB);
}
@@ -10234,7 +11628,7 @@ FNIEMOP_DEF(iemOp_DH_Ib)
FNIEMOP_DEF(iemOp_BH_Ib)
{
IEMOP_MNEMONIC("mov BH,Ib");
- return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI);
+ return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI | pIemCpu->uRexB);
}
@@ -10272,7 +11666,7 @@ FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
}
case IEMMODE_64BIT:
{
- uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
+ uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm); /* 64-bit immediate! */
IEMOP_HLP_NO_LOCK_PREFIX();
IEM_MC_BEGIN(0, 1);
@@ -10292,7 +11686,7 @@ FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
FNIEMOP_DEF(iemOp_eAX_Iv)
{
IEMOP_MNEMONIC("mov rAX,IV");
- return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX);
+ return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX | pIemCpu->uRexB);
}
@@ -10300,7 +11694,7 @@ FNIEMOP_DEF(iemOp_eAX_Iv)
FNIEMOP_DEF(iemOp_eCX_Iv)
{
IEMOP_MNEMONIC("mov rCX,IV");
- return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX);
+ return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX | pIemCpu->uRexB);
}
@@ -10308,7 +11702,7 @@ FNIEMOP_DEF(iemOp_eCX_Iv)
FNIEMOP_DEF(iemOp_eDX_Iv)
{
IEMOP_MNEMONIC("mov rDX,IV");
- return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX);
+ return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX | pIemCpu->uRexB);
}
@@ -10316,7 +11710,7 @@ FNIEMOP_DEF(iemOp_eDX_Iv)
FNIEMOP_DEF(iemOp_eBX_Iv)
{
IEMOP_MNEMONIC("mov rBX,IV");
- return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX);
+ return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX | pIemCpu->uRexB);
}
@@ -10324,7 +11718,7 @@ FNIEMOP_DEF(iemOp_eBX_Iv)
FNIEMOP_DEF(iemOp_eSP_Iv)
{
IEMOP_MNEMONIC("mov rSP,IV");
- return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP);
+ return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP | pIemCpu->uRexB);
}
@@ -10332,7 +11726,7 @@ FNIEMOP_DEF(iemOp_eSP_Iv)
FNIEMOP_DEF(iemOp_eBP_Iv)
{
IEMOP_MNEMONIC("mov rBP,IV");
- return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP);
+ return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP | pIemCpu->uRexB);
}
@@ -10340,7 +11734,7 @@ FNIEMOP_DEF(iemOp_eBP_Iv)
FNIEMOP_DEF(iemOp_eSI_Iv)
{
IEMOP_MNEMONIC("mov rSI,IV");
- return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI);
+ return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI | pIemCpu->uRexB);
}
@@ -10348,7 +11742,7 @@ FNIEMOP_DEF(iemOp_eSI_Iv)
FNIEMOP_DEF(iemOp_eDI_Iv)
{
IEMOP_MNEMONIC("mov rDI,IV");
- return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI);
+ return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI | pIemCpu->uRexB);
}
@@ -10366,7 +11760,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
- case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
+ case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
}
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
@@ -10396,7 +11790,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
IEM_MC_ASSIGN(cShiftArg, cShift);
IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -10426,7 +11820,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
- case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
+ case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
}
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
@@ -10458,6 +11852,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -10490,7 +11885,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
IEM_MC_ASSIGN(cShiftArg, cShift);
IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -10510,7 +11905,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
IEM_MC_ASSIGN(cShiftArg, cShift);
IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -10530,7 +11925,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
IEM_MC_ASSIGN(cShiftArg, cShift);
IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -10573,16 +11968,43 @@ FNIEMOP_DEF(iemOp_retn)
/** Opcode 0xc4. */
FNIEMOP_DEF(iemOp_les_Gv_Mp)
{
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
+ || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ IEMOP_MNEMONIC("2-byte-vex");
+ /* The LES instruction is invalid 64-bit mode. In legacy and
+ compatability mode it is invalid with MOD=3.
+ The use as a VEX prefix is made possible by assigning the inverted
+ REX.R to the top MOD bit, and the top bit in the inverted register
+ specifier to the bottom MOD bit, thereby effectively limiting 32-bit
+ to accessing registers 0..7 in this VEX form. */
+ /** @todo VEX: Just use new tables for it. */
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
IEMOP_MNEMONIC("les Gv,Mp");
- return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_ES);
+ return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm);
}
/** Opcode 0xc5. */
FNIEMOP_DEF(iemOp_lds_Gv_Mp)
{
+ uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+ if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
+ || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
+ {
+ IEMOP_MNEMONIC("3-byte-vex");
+ /* The LDS instruction is invalid 64-bit mode. In legacy and
+ compatability mode it is invalid with MOD=3.
+ The use as a VEX prefix is made possible by assigning the inverted
+ REX.R and REX.X to the two MOD bits, since the REX bits are ignored
+ outside of 64-bit mode. */
+ /** @todo VEX: Just use new tables for it. */
+ return IEMOP_RAISE_INVALID_OPCODE();
+ }
IEMOP_MNEMONIC("lds Gv,Mp");
- return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_DS);
+ return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_DS, bRm);
}
@@ -10609,7 +12031,7 @@ FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
/* memory access. */
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
IEM_MC_ADVANCE_RIP();
@@ -10651,7 +12073,7 @@ FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 0);
- uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
+ uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
@@ -10668,7 +12090,7 @@ FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
IEM_MC_ADVANCE_RIP();
@@ -10678,7 +12100,7 @@ FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
IEM_MC_ADVANCE_RIP();
@@ -10688,8 +12110,8 @@ FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
- uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
+ uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
@@ -10796,7 +12218,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_1)
case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
- case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
+ case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
}
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
@@ -10825,7 +12247,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_1)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
@@ -10854,7 +12276,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1)
case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
- case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
+ case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
}
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
@@ -10885,6 +12307,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1)
IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -10917,7 +12340,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
@@ -10935,7 +12358,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
@@ -10953,7 +12376,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
@@ -11014,7 +12437,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -11076,6 +12499,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -11109,7 +12533,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -11128,7 +12552,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -11147,7 +12571,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
@@ -11412,7 +12836,7 @@ FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32r, uint8_t, bRm, PFNIEMAIMPLFPUR32, pfnAImpl)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -11462,7 +12886,7 @@ FNIEMOP_DEF_1(iemOp_fcom_m32r, uint8_t, bRm)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -11496,7 +12920,7 @@ FNIEMOP_DEF_1(iemOp_fcomp_m32r, uint8_t, bRm)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -11601,7 +13025,7 @@ FNIEMOP_DEF_1(iemOp_fld_m32r, uint8_t, bRm)
IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val, r32Val, 1);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -11633,7 +13057,7 @@ FNIEMOP_DEF_1(iemOp_fst_m32r, uint8_t, bRm)
IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -11669,7 +13093,7 @@ FNIEMOP_DEF_1(iemOp_fstp_m32r, uint8_t, bRm)
IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -11700,11 +13124,12 @@ FNIEMOP_DEF_1(iemOp_fldenv, uint8_t, bRm)
IEMOP_MNEMONIC("fldenv m14/28byte");
IEM_MC_BEGIN(3, 0);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
+ IEM_MC_ARG(uint8_t, iEffSeg, 1);
IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_fldenv, enmEffOpSize, iEffSeg, GCPtrEffSrc);
IEM_MC_END();
return VINF_SUCCESS;
@@ -11718,7 +13143,7 @@ FNIEMOP_DEF_1(iemOp_fldcw, uint8_t, bRm)
IEM_MC_BEGIN(1, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
IEM_MC_ARG(uint16_t, u16Fsw, 0);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_FETCH_MEM_U16(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
@@ -11734,11 +13159,12 @@ FNIEMOP_DEF_1(iemOp_fnstenv, uint8_t, bRm)
IEMOP_MNEMONIC("fstenv m14/m28byte");
IEM_MC_BEGIN(3, 0);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
+ IEM_MC_ARG(uint8_t, iEffSeg, 1);
IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_fnstenv, enmEffOpSize, iEffSeg, GCPtrEffDst);
IEM_MC_END();
return VINF_SUCCESS;
@@ -11752,7 +13178,7 @@ FNIEMOP_DEF_1(iemOp_fnstcw, uint8_t, bRm)
IEM_MC_BEGIN(2, 0);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(uint16_t, u16Fcw);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_FETCH_FCW(u16Fcw);
@@ -12532,7 +13958,7 @@ FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32i, uint8_t, bRm, PFNIEMAIMPLFPUI32, pfnAImpl)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -12582,7 +14008,7 @@ FNIEMOP_DEF_1(iemOp_ficom_m32i, uint8_t, bRm)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -12616,7 +14042,7 @@ FNIEMOP_DEF_1(iemOp_ficomp_m32i, uint8_t, bRm)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -12722,7 +14148,7 @@ FNIEMOP_DEF_1(iemOp_fild_m32i, uint8_t, bRm)
IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val, i32Val, 1);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -12754,7 +14180,7 @@ FNIEMOP_DEF_1(iemOp_fisttp_m32i, uint8_t, bRm)
IEM_MC_ARG(int32_t *, pi32Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -12790,7 +14216,7 @@ FNIEMOP_DEF_1(iemOp_fist_m32i, uint8_t, bRm)
IEM_MC_ARG(int32_t *, pi32Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -12826,7 +14252,7 @@ FNIEMOP_DEF_1(iemOp_fistp_m32i, uint8_t, bRm)
IEM_MC_ARG(int32_t *, pi32Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -12863,7 +14289,7 @@ FNIEMOP_DEF_1(iemOp_fld_m80r, uint8_t, bRm)
IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
IEM_MC_ARG_LOCAL_REF(PCRTFLOAT80U, pr80Val, r80Val, 1);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -12895,7 +14321,7 @@ FNIEMOP_DEF_1(iemOp_fstp_m80r, uint8_t, bRm)
IEM_MC_ARG(PRTFLOAT80U, pr80Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -13276,7 +14702,7 @@ FNIEMOP_DEF_2(iemOpHlpFpu_ST0_m64r, uint8_t, bRm, PFNIEMAIMPLFPUR64, pfnImpl)
IEM_MC_ARG(PCRTFLOAT80U, pr80Factor1, 1);
IEM_MC_ARG_LOCAL_REF(PRTFLOAT64U, pr64Factor2, r64Factor2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -13325,7 +14751,7 @@ FNIEMOP_DEF_1(iemOp_fcom_m64r, uint8_t, bRm)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -13359,7 +14785,7 @@ FNIEMOP_DEF_1(iemOp_fcomp_m64r, uint8_t, bRm)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -13463,7 +14889,7 @@ FNIEMOP_DEF_1(iemOp_fld_m64r, uint8_t, bRm)
IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val, r64Val, 1);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -13494,7 +14920,7 @@ FNIEMOP_DEF_1(iemOp_fisttp_m64i, uint8_t, bRm)
IEM_MC_ARG(int64_t *, pi64Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -13530,7 +14956,7 @@ FNIEMOP_DEF_1(iemOp_fst_m64r, uint8_t, bRm)
IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -13568,7 +14994,7 @@ FNIEMOP_DEF_1(iemOp_fstp_m64r, uint8_t, bRm)
IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -13596,14 +15022,15 @@ FNIEMOP_DEF_1(iemOp_fstp_m64r, uint8_t, bRm)
/** Opcode 0xdd !11/0. */
FNIEMOP_DEF_1(iemOp_frstor, uint8_t, bRm)
{
- IEMOP_MNEMONIC("fxrstor m94/108byte");
+ IEMOP_MNEMONIC("frstor m94/108byte");
IEM_MC_BEGIN(3, 0);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
+ IEM_MC_ARG(uint8_t, iEffSeg, 1);
IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_frstor, enmEffOpSize, iEffSeg, GCPtrEffSrc);
IEM_MC_END();
return VINF_SUCCESS;
@@ -13616,11 +15043,12 @@ FNIEMOP_DEF_1(iemOp_fnsave, uint8_t, bRm)
IEMOP_MNEMONIC("fnsave m94/108byte");
IEM_MC_BEGIN(3, 0);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
- IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1);
+ IEM_MC_ARG(uint8_t, iEffSeg, 1);
IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
+ IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
IEM_MC_CALL_CIMPL_3(iemCImpl_fnsave, enmEffOpSize, iEffSeg, GCPtrEffDst);
IEM_MC_END();
return VINF_SUCCESS;
@@ -13636,10 +15064,10 @@ FNIEMOP_DEF_1(iemOp_fnstsw, uint8_t, bRm)
IEM_MC_LOCAL(uint16_t, u16Tmp);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
IEM_MC_FETCH_FSW(u16Tmp);
IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
IEM_MC_ADVANCE_RIP();
@@ -13825,7 +15253,7 @@ FNIEMOP_DEF_2(iemOpHlpFpu_st0_m16i, uint8_t, bRm, PFNIEMAIMPLFPUI16, pfnAImpl)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -13875,7 +15303,7 @@ FNIEMOP_DEF_1(iemOp_ficom_m16i, uint8_t, bRm)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -13909,7 +15337,7 @@ FNIEMOP_DEF_1(iemOp_ficomp_m16i, uint8_t, bRm)
IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
@@ -14073,7 +15501,7 @@ FNIEMOP_DEF_1(iemOp_fisttp_m16i, uint8_t, bRm)
IEM_MC_ARG(int16_t *, pi16Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -14109,7 +15537,7 @@ FNIEMOP_DEF_1(iemOp_fist_m16i, uint8_t, bRm)
IEM_MC_ARG(int16_t *, pi16Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -14145,7 +15573,7 @@ FNIEMOP_DEF_1(iemOp_fistp_m16i, uint8_t, bRm)
IEM_MC_ARG(int16_t *, pi16Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -14191,7 +15619,7 @@ FNIEMOP_DEF_1(iemOp_fistp_m64i, uint8_t, bRm)
IEM_MC_ARG(int64_t *, pi64Dst, 1);
IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
IEM_MC_MAYBE_RAISE_FPU_XCPT();
@@ -14366,34 +15794,58 @@ FNIEMOP_DEF(iemOp_loop_Jb)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,0);
- IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
- IEM_MC_IF_CX_IS_NZ() {
- IEM_MC_REL_JMP_S8(i8Imm);
- } IEM_MC_ELSE() {
+ if (-(int8_t)pIemCpu->offOpcode != i8Imm)
+ {
+ IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
+ IEM_MC_IF_CX_IS_NZ() {
+ IEM_MC_REL_JMP_S8(i8Imm);
+ } IEM_MC_ELSE() {
+ IEM_MC_ADVANCE_RIP();
+ } IEM_MC_ENDIF();
+ }
+ else
+ {
+ IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xCX, 0);
IEM_MC_ADVANCE_RIP();
- } IEM_MC_ENDIF();
+ }
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,0);
- IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
- IEM_MC_IF_ECX_IS_NZ() {
- IEM_MC_REL_JMP_S8(i8Imm);
- } IEM_MC_ELSE() {
+ if (-(int8_t)pIemCpu->offOpcode != i8Imm)
+ {
+ IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
+ IEM_MC_IF_ECX_IS_NZ() {
+ IEM_MC_REL_JMP_S8(i8Imm);
+ } IEM_MC_ELSE() {
+ IEM_MC_ADVANCE_RIP();
+ } IEM_MC_ENDIF();
+ }
+ else
+ {
+ IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xCX, 0);
IEM_MC_ADVANCE_RIP();
- } IEM_MC_ENDIF();
+ }
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,0);
- IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
- IEM_MC_IF_RCX_IS_NZ() {
- IEM_MC_REL_JMP_S8(i8Imm);
- } IEM_MC_ELSE() {
+ if (-(int8_t)pIemCpu->offOpcode != i8Imm)
+ {
+ IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
+ IEM_MC_IF_RCX_IS_NZ() {
+ IEM_MC_REL_JMP_S8(i8Imm);
+ } IEM_MC_ELSE() {
+ IEM_MC_ADVANCE_RIP();
+ } IEM_MC_ENDIF();
+ }
+ else
+ {
+ IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xCX, 0);
IEM_MC_ADVANCE_RIP();
- } IEM_MC_ENDIF();
+ }
IEM_MC_END();
return VINF_SUCCESS;
@@ -14620,6 +16072,7 @@ FNIEMOP_DEF(iemOp_out_DX_eAX)
/** Opcode 0xf0. */
FNIEMOP_DEF(iemOp_lock)
{
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("lock");
pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
@@ -14632,6 +16085,7 @@ FNIEMOP_DEF(iemOp_repne)
{
/* This overrides any previous REPE prefix. */
pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repne");
pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
@@ -14644,6 +16098,7 @@ FNIEMOP_DEF(iemOp_repe)
{
/* This overrides any previous REPNE prefix. */
pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
+ IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repe");
pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
@@ -14700,7 +16155,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
@@ -14738,7 +16193,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
@@ -14758,7 +16213,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
@@ -14778,7 +16233,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
IEM_MC_FETCH_EFLAGS(EFlags);
if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
@@ -14830,7 +16285,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
IEM_MC_ASSIGN(u8Src, u8Imm);
IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -14883,6 +16338,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
+ /* No clearing the high dword here - test doesn't write back the result. */
IEM_MC_ADVANCE_RIP();
IEM_MC_END();
return VINF_SUCCESS;
@@ -14919,7 +16375,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
IEM_MC_ASSIGN(u16Src, u16Imm);
IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -14941,7 +16397,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
IEM_MC_ASSIGN(u32Src, u32Imm);
IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -14963,7 +16419,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
IEM_MC_ASSIGN(u64Src, u64Imm);
IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
@@ -14992,15 +16448,22 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
{
/* register access */
IEMOP_HLP_NO_LOCK_PREFIX();
- IEM_MC_BEGIN(3, 0);
+ IEM_MC_BEGIN(3, 1);
IEM_MC_ARG(uint16_t *, pu16AX, 0);
IEM_MC_ARG(uint8_t, u8Value, 1);
IEM_MC_ARG(uint32_t *, pEFlags, 2);
+ IEM_MC_LOCAL(int32_t, rc);
+
IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
IEM_MC_REF_EFLAGS(pEFlags);
- IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
- IEM_MC_ADVANCE_RIP();
+ IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
+ IEM_MC_IF_LOCAL_IS_Z(rc) {
+ IEM_MC_ADVANCE_RIP();
+ } IEM_MC_ELSE() {
+ IEM_MC_RAISE_DIVIDE_ERROR();
+ } IEM_MC_ENDIF();
+
IEM_MC_END();
}
else
@@ -15008,19 +16471,24 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
/* memory access. */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
- IEM_MC_BEGIN(3, 1);
+ IEM_MC_BEGIN(3, 2);
IEM_MC_ARG(uint16_t *, pu16AX, 0);
IEM_MC_ARG(uint8_t, u8Value, 1);
IEM_MC_ARG(uint32_t *, pEFlags, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
+ IEM_MC_LOCAL(int32_t, rc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
IEM_MC_REF_EFLAGS(pEFlags);
- IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
+ IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
+ IEM_MC_IF_LOCAL_IS_Z(rc) {
+ IEM_MC_ADVANCE_RIP();
+ } IEM_MC_ELSE() {
+ IEM_MC_RAISE_DIVIDE_ERROR();
+ } IEM_MC_ENDIF();
- IEM_MC_ADVANCE_RIP();
IEM_MC_END();
}
return VINF_SUCCESS;
@@ -15079,6 +16547,8 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
IEM_MC_IF_LOCAL_IS_Z(rc) {
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
IEM_MC_ADVANCE_RIP();
} IEM_MC_ELSE() {
IEM_MC_RAISE_DIVIDE_ERROR();
@@ -15132,7 +16602,7 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(int32_t, rc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
@@ -15159,13 +16629,15 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(int32_t, rc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
IEM_MC_REF_EFLAGS(pEFlags);
IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
IEM_MC_IF_LOCAL_IS_Z(rc) {
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
+ IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
IEM_MC_ADVANCE_RIP();
} IEM_MC_ELSE() {
IEM_MC_RAISE_DIVIDE_ERROR();
@@ -15186,7 +16658,7 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
IEM_MC_LOCAL(int32_t, rc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
@@ -15420,7 +16892,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
IEM_MC_BEGIN(1, 1);
IEM_MC_ARG(uint16_t, u16Target, 0);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
IEM_MC_END()
@@ -15430,7 +16902,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
IEM_MC_BEGIN(1, 1);
IEM_MC_ARG(uint32_t, u32Target, 0);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
IEM_MC_END()
@@ -15440,7 +16912,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
IEM_MC_BEGIN(1, 1);
IEM_MC_ARG(uint64_t, u64Target, 0);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
IEM_MC_END()
@@ -15468,7 +16940,7 @@ FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImp
IEM_MC_ARG(uint16_t, offSeg, 1);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
@@ -15476,13 +16948,34 @@ FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImp
IEM_MC_END();
return VINF_SUCCESS;
+ case IEMMODE_64BIT:
+ /** @todo testcase: AMD does not seem to believe in the case (see bs-cpu-xcpt-1)
+ * and will apparently ignore REX.W, at least for the jmp far qword [rsp]
+ * and call far qword [rsp] encodings. */
+ if (!IEM_IS_GUEST_CPU_AMD(pIemCpu))
+ {
+ IEM_MC_BEGIN(3, 1);
+ IEM_MC_ARG(uint16_t, u16Sel, 0);
+ IEM_MC_ARG(uint64_t, offSeg, 1);
+ IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
+ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+ IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
+ IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
+ IEM_MC_END();
+ return VINF_SUCCESS;
+ }
+ /* AMD falls thru. */
+
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 1);
IEM_MC_ARG(uint16_t, u16Sel, 0);
IEM_MC_ARG(uint32_t, offSeg, 1);
IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
@@ -15490,20 +16983,6 @@ FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImp
IEM_MC_END();
return VINF_SUCCESS;
- case IEMMODE_64BIT:
- IEM_MC_BEGIN(3, 1);
- IEM_MC_ARG(uint16_t, u16Sel, 0);
- IEM_MC_ARG(uint64_t, offSeg, 1);
- IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
- IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
- IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
- IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
- IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
- IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
- IEM_MC_END();
- return VINF_SUCCESS;
-
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
}
@@ -15564,14 +17043,14 @@ FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
}
else
{
- /* The new RIP is taken from a register. */
+ /* The new RIP is taken from a memory location. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Target);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_SET_RIP_U16(u16Target);
IEM_MC_END()
@@ -15581,7 +17060,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint32_t, u32Target);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_SET_RIP_U32(u32Target);
IEM_MC_END()
@@ -15589,11 +17068,11 @@ FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
- IEM_MC_LOCAL(uint32_t, u32Target);
+ IEM_MC_LOCAL(uint64_t, u64Target);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
- IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
- IEM_MC_SET_RIP_U32(u32Target);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+ IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
+ IEM_MC_SET_RIP_U64(u64Target);
IEM_MC_END()
return VINF_SUCCESS;
@@ -15609,8 +17088,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
*/
FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
{
- IEMOP_MNEMONIC("jmp Ep");
- IEMOP_HLP_NO_64BIT();
+ IEMOP_MNEMONIC("jmpf Ep");
return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_FarJmp);
}
@@ -15636,7 +17114,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint16_t, u16Src);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_PUSH_U16(u16Src);
IEM_MC_ADVANCE_RIP();
@@ -15647,7 +17125,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint32_t, u32Src);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_PUSH_U32(u32Src);
IEM_MC_ADVANCE_RIP();
@@ -15658,7 +17136,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
IEM_MC_BEGIN(0, 2);
IEM_MC_LOCAL(uint64_t, u64Src);
IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
- IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
+ IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
IEM_MC_PUSH_U64(u64Src);
IEM_MC_ADVANCE_RIP();
@@ -15727,7 +17205,7 @@ const PFNIEMOP g_apfnOneByteMap[256] =
/* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
/* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
/* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
- /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw,
+ /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw_movsx_Gv_Ev,
/* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
/* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
/* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,