aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Changelog1
-rw-r--r--Makefile.target2
-rw-r--r--linux-user/main.c4
-rw-r--r--target-i386/cpu.h30
-rw-r--r--target-i386/exec.h9
-rw-r--r--target-i386/helper.c26
-rw-r--r--target-i386/helper2.c30
-rw-r--r--target-i386/op.c80
-rw-r--r--target-i386/ops_mem.h16
-rw-r--r--target-i386/ops_sse.h1370
-rw-r--r--target-i386/translate.c888
-rw-r--r--vl.c117
12 files changed, 2402 insertions, 171 deletions
diff --git a/Changelog b/Changelog
index d041fc36a2..539e3ec962 100644
--- a/Changelog
+++ b/Changelog
@@ -11,6 +11,7 @@ version 0.6.2:
- added generic 64 bit target support
- initial x86_64 target support
- initial APIC support
+ - MMX/SSE/SSE2/PNI support
version 0.6.1:
diff --git a/Makefile.target b/Makefile.target
index b07e4903e3..942a105ea6 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -392,7 +392,7 @@ helper.o: helper.c
$(CC) $(HELPER_CFLAGS) $(DEFINES) -c -o $@ $<
ifeq ($(TARGET_BASE_ARCH), i386)
-op.o: op.c opreg_template.h ops_template.h ops_template_mem.h ops_mem.h
+op.o: op.c opreg_template.h ops_template.h ops_template_mem.h ops_mem.h ops_sse.h
endif
ifeq ($(TARGET_ARCH), arm)
diff --git a/linux-user/main.c b/linux-user/main.c
index aa5923f844..f2b83b0c27 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -1052,8 +1052,8 @@ int main(int argc, char **argv)
cpu_x86_set_cpl(env, 3);
env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
- env->hflags |= HF_PE_MASK;
-
+ env->hflags |= HF_PE_MASK | HF_OSFXSR_MASK;
+
/* flags setup : we activate the IRQs by default as in user mode */
env->eflags |= IF_MASK;
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index e65fc2e2ce..e494d23f7b 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -135,6 +135,7 @@
#define HF_IOPL_SHIFT 12 /* must be same as eflags */
#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
+#define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */
#define HF_VM_SHIFT 17 /* must be same as eflags */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
@@ -150,6 +151,7 @@
#define HF_TS_MASK (1 << HF_TS_SHIFT)
#define HF_LMA_MASK (1 << HF_LMA_SHIFT)
#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
+#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
#define CR0_PE_MASK (1 << 0)
#define CR0_MP_MASK (1 << 1)
@@ -340,10 +342,12 @@ typedef struct SegmentCache {
} SegmentCache;
typedef union {
- uint8_t _b[16];
- uint16_t _w[8];
- uint32_t _l[4];
- uint64_t _q[2];
+ uint8_t _b[16];
+ uint16_t _w[8];
+ uint32_t _l[4];
+ uint64_t _q[2];
+ float _s[4];
+ double _d[2];
} XMMReg;
typedef union {
@@ -357,7 +361,9 @@ typedef union {
#define XMM_B(n) _b[15 - (n)]
#define XMM_W(n) _w[7 - (n)]
#define XMM_L(n) _l[3 - (n)]
+#define XMM_S(n) _s[3 - (n)]
#define XMM_Q(n) _q[1 - (n)]
+#define XMM_D(n) _d[1 - (n)]
#define MMX_B(n) _b[7 - (n)]
#define MMX_W(n) _w[3 - (n)]
@@ -366,12 +372,15 @@ typedef union {
#define XMM_B(n) _b[n]
#define XMM_W(n) _w[n]
#define XMM_L(n) _l[n]
+#define XMM_S(n) _s[n]
#define XMM_Q(n) _q[n]
+#define XMM_D(n) _d[n]
#define MMX_B(n) _b[n]
#define MMX_W(n) _w[n]
#define MMX_L(n) _l[n]
#endif
+#define MMX_Q(n) q
#ifdef TARGET_X86_64
#define CPU_NB_REGS 16
@@ -404,7 +413,14 @@ typedef struct CPUX86State {
unsigned int fpus;
unsigned int fpuc;
uint8_t fptags[8]; /* 0 = valid, 1 = empty */
- CPU86_LDouble fpregs[8];
+ union {
+#ifdef USE_X86LDOUBLE
+ CPU86_LDouble d __attribute__((aligned(16)));
+#else
+ CPU86_LDouble d;
+#endif
+ MMXReg mmx;
+ } fpregs[8];
/* emulator internal variables */
CPU86_LDouble ft0;
@@ -421,9 +437,11 @@ typedef struct CPUX86State {
SegmentCache tr;
SegmentCache gdt; /* only base and limit are used */
SegmentCache idt; /* only base and limit are used */
-
+
+ uint32_t mxcsr;
XMMReg xmm_regs[CPU_NB_REGS];
XMMReg xmm_t0;
+ MMXReg mmx_t0;
/* sysenter registers */
uint32_t sysenter_cs;
diff --git a/target-i386/exec.h b/target-i386/exec.h
index 330343164a..1923b95ee4 100644
--- a/target-i386/exec.h
+++ b/target-i386/exec.h
@@ -131,8 +131,8 @@ extern int loglevel;
/* float macros */
#define FT0 (env->ft0)
-#define ST0 (env->fpregs[env->fpstt])
-#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7])
+#define ST0 (env->fpregs[env->fpstt].d)
+#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
#define ST1 ST(1)
#ifdef USE_FP_CONVERT
@@ -459,7 +459,7 @@ static inline CPU86_LDouble helper_fldt(target_ulong ptr)
return temp.d;
}
-static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
+static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr)
{
CPU86_LDoubleU temp;
int e;
@@ -557,6 +557,9 @@ void helper_fxsave(target_ulong ptr, int data64);
void helper_fxrstor(target_ulong ptr, int data64);
void restore_native_fp_state(CPUState *env);
void save_native_fp_state(CPUState *env);
+float approx_rsqrt(float a);
+float approx_rcp(float a);
+int fpu_isnan(double a);
extern const uint8_t parity_table[256];
extern const uint8_t rclw_table[32];
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 2567657a7f..9907f8e0b0 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -2444,7 +2444,7 @@ void helper_fldt_ST0_A0(void)
{
int new_fpstt;
new_fpstt = (env->fpstt - 1) & 7;
- env->fpregs[new_fpstt] = helper_fldt(A0);
+ env->fpregs[new_fpstt].d = helper_fldt(A0);
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
}
@@ -2804,9 +2804,10 @@ void helper_fstenv(target_ulong ptr, int data32)
if (env->fptags[i]) {
fptag |= 3;
} else {
- tmp.d = env->fpregs[i];
+ tmp.d = env->fpregs[i].d;
exp = EXPD(tmp);
mant = MANTD(tmp);
+ printf("mant=%llx exp=%x\n", mant, exp);
if (exp == 0 && mant == 0) {
/* zero */
fptag |= 1;
@@ -2930,7 +2931,7 @@ void helper_fxsave(target_ulong ptr, int data64)
if (env->cr[4] & CR4_OSFXSR_MASK) {
/* XXX: finish it */
- stl(ptr + 0x18, 0); /* mxcsr */
+ stl(ptr + 0x18, env->mxcsr); /* mxcsr */
stl(ptr + 0x1c, 0); /* mxcsr_mask */
nb_xmm_regs = 8 << data64;
addr = ptr + 0xa0;
@@ -2967,7 +2968,7 @@ void helper_fxrstor(target_ulong ptr, int data64)
if (env->cr[4] & CR4_OSFXSR_MASK) {
/* XXX: finish it, endianness */
- //ldl(ptr + 0x18);
+ env->mxcsr = ldl(ptr + 0x18);
//ldl(ptr + 0x1c);
nb_xmm_regs = 8 << data64;
addr = ptr + 0xa0;
@@ -3209,6 +3210,23 @@ void helper_idivq_EAX_T0(void)
#endif
+/* XXX: do it */
+int fpu_isnan(double a)
+{
+ return 0;
+}
+
+float approx_rsqrt(float a)
+{
+ return 1.0 / sqrt(a);
+}
+
+float approx_rcp(float a)
+{
+ return 1.0 / a;
+}
+
+
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu
diff --git a/target-i386/helper2.c b/target-i386/helper2.c
index 34307928ae..2811cd707f 100644
--- a/target-i386/helper2.c
+++ b/target-i386/helper2.c
@@ -158,6 +158,8 @@ void cpu_reset(CPUX86State *env)
for(i = 0;i < 8; i++)
env->fptags[i] = 1;
env->fpuc = 0x37f;
+
+ env->mxcsr = 0x1f80;
}
void cpu_x86_close(CPUX86State *env)
@@ -376,15 +378,15 @@ void cpu_dump_state(CPUState *env, FILE *f,
}
if (flags & X86_DUMP_FPU) {
cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
- (double)env->fpregs[0],
- (double)env->fpregs[1],
- (double)env->fpregs[2],
- (double)env->fpregs[3]);
+ (double)env->fpregs[0].d,
+ (double)env->fpregs[1].d,
+ (double)env->fpregs[2].d,
+ (double)env->fpregs[3].d);
cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
- (double)env->fpregs[4],
- (double)env->fpregs[5],
- (double)env->fpregs[7],
- (double)env->fpregs[8]);
+ (double)env->fpregs[4].d,
+ (double)env->fpregs[5].d,
+ (double)env->fpregs[7].d,
+ (double)env->fpregs[8].d);
}
}
@@ -471,6 +473,14 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
(env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
tlb_flush(env, 1);
}
+ /* SSE handling */
+ if (!(env->cpuid_features & CPUID_SSE))
+ new_cr4 &= ~CR4_OSFXSR_MASK;
+ if (new_cr4 & CR4_OSFXSR_MASK)
+ env->hflags |= HF_OSFXSR_MASK;
+ else
+ env->hflags &= ~HF_OSFXSR_MASK;
+
env->cr[4] = new_cr4;
}
@@ -800,7 +810,7 @@ void restore_native_fp_state(CPUState *env)
fp->fptag = fptag;
j = env->fpstt;
for(i = 0;i < 8; i++) {
- memcpy(&fp->fpregs1[i * 10], &env->fpregs[j], 10);
+ memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
j = (j + 1) & 7;
}
asm volatile ("frstor %0" : "=m" (*fp));
@@ -824,7 +834,7 @@ void save_native_fp_state(CPUState *env)
}
j = env->fpstt;
for(i = 0;i < 8; i++) {
- memcpy(&env->fpregs[j], &fp->fpregs1[i * 10], 10);
+ memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
j = (j + 1) & 7;
}
/* we must restore the default rounding state */
diff --git a/target-i386/op.c b/target-i386/op.c
index 9ce2a5a2b7..10098256d4 100644
--- a/target-i386/op.c
+++ b/target-i386/op.c
@@ -752,11 +752,6 @@ void OPPROTO op_movswl_T0_T0(void)
T0 = (int16_t)T0;
}
-void OPPROTO op_movslq_T0_T0(void)
-{
- T0 = (int32_t)T0;
-}
-
void OPPROTO op_movzwl_T0_T0(void)
{
T0 = (uint16_t)T0;
@@ -768,6 +763,11 @@ void OPPROTO op_movswl_EAX_AX(void)
}
#ifdef TARGET_X86_64
+void OPPROTO op_movslq_T0_T0(void)
+{
+ T0 = (int32_t)T0;
+}
+
void OPPROTO op_movslq_RAX_EAX(void)
{
EAX = (int32_t)EAX;
@@ -1695,9 +1695,9 @@ void OPPROTO op_flds_ST0_A0(void)
new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
FP_CONVERT.i32 = ldl(A0);
- env->fpregs[new_fpstt] = FP_CONVERT.f;
+ env->fpregs[new_fpstt].d = FP_CONVERT.f;
#else
- env->fpregs[new_fpstt] = ldfl(A0);
+ env->fpregs[new_fpstt].d = ldfl(A0);
#endif
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
@@ -1709,9 +1709,9 @@ void OPPROTO op_fldl_ST0_A0(void)
new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
FP_CONVERT.i64 = ldq(A0);
- env->fpregs[new_fpstt] = FP_CONVERT.d;
+ env->fpregs[new_fpstt].d = FP_CONVERT.d;
#else
- env->fpregs[new_fpstt] = ldfq(A0);
+ env->fpregs[new_fpstt].d = ldfq(A0);
#endif
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
@@ -1729,7 +1729,7 @@ void helper_fild_ST0_A0(void)
{
int new_fpstt;
new_fpstt = (env->fpstt - 1) & 7;
- env->fpregs[new_fpstt] = (CPU86_LDouble)ldsw(A0);
+ env->fpregs[new_fpstt].d = (CPU86_LDouble)ldsw(A0);
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
}
@@ -1738,7 +1738,7 @@ void helper_fildl_ST0_A0(void)
{
int new_fpstt;
new_fpstt = (env->fpstt - 1) & 7;
- env->fpregs[new_fpstt] = (CPU86_LDouble)((int32_t)ldl(A0));
+ env->fpregs[new_fpstt].d = (CPU86_LDouble)((int32_t)ldl(A0));
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
}
@@ -1747,7 +1747,7 @@ void helper_fildll_ST0_A0(void)
{
int new_fpstt;
new_fpstt = (env->fpstt - 1) & 7;
- env->fpregs[new_fpstt] = (CPU86_LDouble)((int64_t)ldq(A0));
+ env->fpregs[new_fpstt].d = (CPU86_LDouble)((int64_t)ldq(A0));
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
}
@@ -1775,9 +1775,9 @@ void OPPROTO op_fild_ST0_A0(void)
new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
FP_CONVERT.i32 = ldsw(A0);
- env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i32;
+ env->fpregs[new_fpstt].d = (CPU86_LDouble)FP_CONVERT.i32;
#else
- env->fpregs[new_fpstt] = (CPU86_LDouble)ldsw(A0);
+ env->fpregs[new_fpstt].d = (CPU86_LDouble)ldsw(A0);
#endif
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
@@ -1789,9 +1789,9 @@ void OPPROTO op_fildl_ST0_A0(void)
new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
FP_CONVERT.i32 = (int32_t) ldl(A0);
- env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i32;
+ env->fpregs[new_fpstt].d = (CPU86_LDouble)FP_CONVERT.i32;
#else
- env->fpregs[new_fpstt] = (CPU86_LDouble)((int32_t)ldl(A0));
+ env->fpregs[new_fpstt].d = (CPU86_LDouble)((int32_t)ldl(A0));
#endif
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
@@ -1803,9 +1803,9 @@ void OPPROTO op_fildll_ST0_A0(void)
new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
FP_CONVERT.i64 = (int64_t) ldq(A0);
- env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i64;
+ env->fpregs[new_fpstt].d = (CPU86_LDouble)FP_CONVERT.i64;
#else
- env->fpregs[new_fpstt] = (CPU86_LDouble)((int64_t)ldq(A0));
+ env->fpregs[new_fpstt].d = (CPU86_LDouble)((int64_t)ldq(A0));
#endif
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
@@ -2322,6 +2322,29 @@ void OPPROTO op_movo(void)
memcpy16(d, s);
}
+void OPPROTO op_movq(void)
+{
+ uint64_t *d, *s;
+ d = (uint64_t *)((char *)env + PARAM1);
+ s = (uint64_t *)((char *)env + PARAM2);
+ *d = *s;
+}
+
+void OPPROTO op_movl(void)
+{
+ uint32_t *d, *s;
+ d = (uint32_t *)((char *)env + PARAM1);
+ s = (uint32_t *)((char *)env + PARAM2);
+ *d = *s;
+}
+
+void OPPROTO op_movq_env_0(void)
+{
+ uint64_t *d;
+ d = (uint64_t *)((char *)env + PARAM1);
+ *d = 0;
+}
+
void OPPROTO op_fxsave_A0(void)
{
helper_fxsave(A0, PARAM1);
@@ -2331,3 +2354,24 @@ void OPPROTO op_fxrstor_A0(void)
{
helper_fxrstor(A0, PARAM1);
}
+
+/* XXX: optimize by storing fptt and fptags in the static cpu state */
+void OPPROTO op_enter_mmx(void)
+{
+ env->fpstt = 0;
+ *(uint32_t *)(env->fptags) = 0;
+ *(uint32_t *)(env->fptags + 4) = 0;
+}
+
+void OPPROTO op_emms(void)
+{
+ /* set to empty state */
+ *(uint32_t *)(env->fptags) = 0x01010101;
+ *(uint32_t *)(env->fptags + 4) = 0x01010101;
+}
+
+#define SHIFT 0
+#include "ops_sse.h"
+
+#define SHIFT 1
+#include "ops_sse.h"
diff --git a/target-i386/ops_mem.h b/target-i386/ops_mem.h
index 21c17008b9..2e6ccc424d 100644
--- a/target-i386/ops_mem.h
+++ b/target-i386/ops_mem.h
@@ -80,7 +80,21 @@ void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T1_A0)(void)
glue(stl, MEMSUFFIX)(A0, T1);
}
-/* SSE support */
+/* SSE/MMX support */
+void OPPROTO glue(glue(op_ldq, MEMSUFFIX), _env_A0)(void)
+{
+ uint64_t *p;
+ p = (uint64_t *)((char *)env + PARAM1);
+ *p = glue(ldq, MEMSUFFIX)(A0);
+}
+
+void OPPROTO glue(glue(op_stq, MEMSUFFIX), _env_A0)(void)
+{
+ uint64_t *p;
+ p = (uint64_t *)((char *)env + PARAM1);
+ glue(stq, MEMSUFFIX)(A0, *p);
+}
+
void OPPROTO glue(glue(op_ldo, MEMSUFFIX), _env_A0)(void)
{
XMMReg *p;
diff --git a/target-i386/ops_sse.h b/target-i386/ops_sse.h
new file mode 100644
index 0000000000..39c90d0c29
--- /dev/null
+++ b/target-i386/ops_sse.h
@@ -0,0 +1,1370 @@
+/*
+ * MMX/SSE/SSE2/PNI support
+ *
+ * Copyright (c) 2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#if SHIFT == 0
+#define Reg MMXReg
+#define XMM_ONLY(x...)
+#define B(n) MMX_B(n)
+#define W(n) MMX_W(n)
+#define L(n) MMX_L(n)
+#define Q(n) q
+#define SUFFIX _mmx
+#else
+#define Reg XMMReg
+#define XMM_ONLY(x...) x
+#define B(n) XMM_B(n)
+#define W(n) XMM_W(n)
+#define L(n) XMM_L(n)
+#define Q(n) XMM_Q(n)
+#define SUFFIX _xmm
+#endif
+
+void OPPROTO glue(op_psrlw, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ if (s->Q(0) > 15) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->W(0) >>= shift;
+ d->W(1) >>= shift;
+ d->W(2) >>= shift;
+ d->W(3) >>= shift;
+#if SHIFT == 1
+ d->W(4) >>= shift;
+ d->W(5) >>= shift;
+ d->W(6) >>= shift;
+ d->W(7) >>= shift;
+#endif
+ }
+}
+
+void OPPROTO glue(op_psraw, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ if (s->Q(0) > 15) {
+ shift = 15;
+ } else {
+ shift = s->B(0);
+ }
+ d->W(0) = (int16_t)d->W(0) >> shift;
+ d->W(1) = (int16_t)d->W(1) >> shift;
+ d->W(2) = (int16_t)d->W(2) >> shift;
+ d->W(3) = (int16_t)d->W(3) >> shift;
+#if SHIFT == 1
+ d->W(4) = (int16_t)d->W(4) >> shift;
+ d->W(5) = (int16_t)d->W(5) >> shift;
+ d->W(6) = (int16_t)d->W(6) >> shift;
+ d->W(7) = (int16_t)d->W(7) >> shift;
+#endif
+}
+
+void OPPROTO glue(op_psllw, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ if (s->Q(0) > 15) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->W(0) <<= shift;
+ d->W(1) <<= shift;
+ d->W(2) <<= shift;
+ d->W(3) <<= shift;
+#if SHIFT == 1
+ d->W(4) <<= shift;
+ d->W(5) <<= shift;
+ d->W(6) <<= shift;
+ d->W(7) <<= shift;
+#endif
+ }
+}
+
+void OPPROTO glue(op_psrld, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ if (s->Q(0) > 31) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->L(0) >>= shift;
+ d->L(1) >>= shift;
+#if SHIFT == 1
+ d->L(2) >>= shift;
+ d->L(3) >>= shift;
+#endif
+ }
+}
+
+void OPPROTO glue(op_psrad, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ if (s->Q(0) > 31) {
+ shift = 31;
+ } else {
+ shift = s->B(0);
+ }
+ d->L(0) = (int32_t)d->L(0) >> shift;
+ d->L(1) = (int32_t)d->L(1) >> shift;
+#if SHIFT == 1
+ d->L(2) = (int32_t)d->L(2) >> shift;
+ d->L(3) = (int32_t)d->L(3) >> shift;
+#endif
+}
+
+void OPPROTO glue(op_pslld, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ if (s->Q(0) > 31) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->L(0) <<= shift;
+ d->L(1) <<= shift;
+#if SHIFT == 1
+ d->L(2) <<= shift;
+ d->L(3) <<= shift;
+#endif
+ }
+}
+
+void OPPROTO glue(op_psrlq, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ if (s->Q(0) > 63) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->Q(0) >>= shift;
+#if SHIFT == 1
+ d->Q(1) >>= shift;
+#endif
+ }
+}
+
+void OPPROTO glue(op_psllq, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ if (s->Q(0) > 63) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->Q(0) <<= shift;
+#if SHIFT == 1
+ d->Q(1) <<= shift;
+#endif
+ }
+}
+
+#if SHIFT == 1
+void OPPROTO glue(op_psrldq, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift, i;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ shift = s->L(0);
+ if (shift > 16)
+ shift = 16;
+ for(i = 0; i < 16 - shift; i++)
+ d->B(i) = d->B(i + shift);
+ for(i = 16 - shift; i < 16; i++)
+ d->B(i) = 0;
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_pslldq, SUFFIX)(void)
+{
+ Reg *d, *s;
+ int shift, i;
+
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ shift = s->L(0);
+ if (shift > 16)
+ shift = 16;
+ for(i = 15; i >= shift; i--)
+ d->B(i) = d->B(i - shift);
+ for(i = 0; i < shift; i++)
+ d->B(i) = 0;
+ FORCE_RET();
+}
+#endif
+
+#define SSE_OP_B(name, F)\
+void OPPROTO glue(name, SUFFIX) (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->B(0) = F(d->B(0), s->B(0));\
+ d->B(1) = F(d->B(1), s->B(1));\
+ d->B(2) = F(d->B(2), s->B(2));\
+ d->B(3) = F(d->B(3), s->B(3));\
+ d->B(4) = F(d->B(4), s->B(4));\
+ d->B(5) = F(d->B(5), s->B(5));\
+ d->B(6) = F(d->B(6), s->B(6));\
+ d->B(7) = F(d->B(7), s->B(7));\
+ XMM_ONLY(\
+ d->B(8) = F(d->B(8), s->B(8));\
+ d->B(9) = F(d->B(9), s->B(9));\
+ d->B(10) = F(d->B(10), s->B(10));\
+ d->B(11) = F(d->B(11), s->B(11));\
+ d->B(12) = F(d->B(12), s->B(12));\
+ d->B(13) = F(d->B(13), s->B(13));\
+ d->B(14) = F(d->B(14), s->B(14));\
+ d->B(15) = F(d->B(15), s->B(15));\
+ )\
+}
+
+#define SSE_OP_W(name, F)\
+void OPPROTO glue(name, SUFFIX) (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->W(0) = F(d->W(0), s->W(0));\
+ d->W(1) = F(d->W(1), s->W(1));\
+ d->W(2) = F(d->W(2), s->W(2));\
+ d->W(3) = F(d->W(3), s->W(3));\
+ XMM_ONLY(\
+ d->W(4) = F(d->W(4), s->W(4));\
+ d->W(5) = F(d->W(5), s->W(5));\
+ d->W(6) = F(d->W(6), s->W(6));\
+ d->W(7) = F(d->W(7), s->W(7));\
+ )\
+}
+
+#define SSE_OP_L(name, F)\
+void OPPROTO glue(name, SUFFIX) (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->L(0) = F(d->L(0), s->L(0));\
+ d->L(1) = F(d->L(1), s->L(1));\
+ XMM_ONLY(\
+ d->L(2) = F(d->L(2), s->L(2));\
+ d->L(3) = F(d->L(3), s->L(3));\
+ )\
+}
+
+#define SSE_OP_Q(name, F)\
+void OPPROTO glue(name, SUFFIX) (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->Q(0) = F(d->Q(0), s->Q(0));\
+ XMM_ONLY(\
+ d->Q(1) = F(d->Q(1), s->Q(1));\
+ )\
+}
+
+#if SHIFT == 0
+static inline int satub(int x)
+{
+ if (x < 0)
+ return 0;
+ else if (x > 255)
+ return 255;
+ else
+ return x;
+}
+
+static inline int satuw(int x)
+{
+ if (x < 0)
+ return 0;
+ else if (x > 65535)
+ return 65535;
+ else
+ return x;
+}
+
+static inline int satsb(int x)
+{
+ if (x < -128)
+ return -128;
+ else if (x > 127)
+ return 127;
+ else
+ return x;
+}
+
+static inline int satsw(int x)
+{
+ if (x < -32768)
+ return -32768;
+ else if (x > 32767)
+ return 32767;
+ else
+ return x;
+}
+
+#define FADD(a, b) ((a) + (b))
+#define FADDUB(a, b) satub((a) + (b))
+#define FADDUW(a, b) satuw((a) + (b))
+#define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b))
+#define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b))
+
+#define FSUB(a, b) ((a) - (b))
+#define FSUBUB(a, b) satub((a) - (b))
+#define FSUBUW(a, b) satuw((a) - (b))
+#define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b))
+#define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b))
+#define FMINUB(a, b) ((a) < (b)) ? (a) : (b)
+#define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b)
+#define FMAXUB(a, b) ((a) > (b)) ? (a) : (b)
+#define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b)
+
+#define FAND(a, b) (a) & (b)
+#define FANDN(a, b) ((~(a)) & (b))
+#define FOR(a, b) (a) | (b)
+#define FXOR(a, b) (a) ^ (b)
+
+#define FCMPGTB(a, b) (int8_t)(a) > (int8_t)(b) ? -1 : 0
+#define FCMPGTW(a, b) (int16_t)(a) > (int16_t)(b) ? -1 : 0
+#define FCMPGTL(a, b) (int32_t)(a) > (int32_t)(b) ? -1 : 0
+#define FCMPEQ(a, b) (a) == (b) ? -1 : 0
+
+#define FMULLW(a, b) (a) * (b)
+#define FMULHUW(a, b) (a) * (b) >> 16
+#define FMULHW(a, b) (int16_t)(a) * (int16_t)(b) >> 16
+
+#define FAVG(a, b) ((a) + (b) + 1) >> 1
+#endif
+
+SSE_OP_B(op_paddb, FADD)
+SSE_OP_W(op_paddw, FADD)
+SSE_OP_L(op_paddl, FADD)
+SSE_OP_Q(op_paddq, FADD)
+
+SSE_OP_B(op_psubb, FSUB)
+SSE_OP_W(op_psubw, FSUB)
+SSE_OP_L(op_psubl, FSUB)
+SSE_OP_Q(op_psubq, FSUB)
+
+SSE_OP_B(op_paddusb, FADDUB)
+SSE_OP_B(op_paddsb, FADDSB)
+SSE_OP_B(op_psubusb, FSUBUB)
+SSE_OP_B(op_psubsb, FSUBSB)
+
+SSE_OP_W(op_paddusw, FADDUW)
+SSE_OP_W(op_paddsw, FADDSW)
+SSE_OP_W(op_psubusw, FSUBUW)
+SSE_OP_W(op_psubsw, FSUBSW)
+
+SSE_OP_B(op_pminub, FMINUB)
+SSE_OP_B(op_pmaxub, FMAXUB)
+
+SSE_OP_W(op_pminsw, FMINSW)
+SSE_OP_W(op_pmaxsw, FMAXSW)
+
+SSE_OP_Q(op_pand, FAND)
+SSE_OP_Q(op_pandn, FANDN)
+SSE_OP_Q(op_por, FOR)
+SSE_OP_Q(op_pxor, FXOR)
+
+SSE_OP_B(op_pcmpgtb, FCMPGTB)
+SSE_OP_W(op_pcmpgtw, FCMPGTW)
+SSE_OP_L(op_pcmpgtl, FCMPGTL)
+
+SSE_OP_B(op_pcmpeqb, FCMPEQ)
+SSE_OP_W(op_pcmpeqw, FCMPEQ)
+SSE_OP_L(op_pcmpeql, FCMPEQ)
+
+SSE_OP_W(op_pmullw, FMULLW)
+SSE_OP_W(op_pmulhuw, FMULHUW)
+SSE_OP_W(op_pmulhw, FMULHW)
+
+SSE_OP_B(op_pavgb, FAVG)
+SSE_OP_W(op_pavgw, FAVG)
+
+void OPPROTO glue(op_pmuludq, SUFFIX) (void)
+{
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0);
+#if SHIFT == 1
+ d->Q(1) = (uint64_t)s->L(2) * (uint64_t)d->L(2);
+#endif
+}
+
+void OPPROTO glue(op_pmaddwd, SUFFIX) (void)
+{
+ int i;
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ for(i = 0; i < (2 << SHIFT); i++) {
+ d->L(i) = (int16_t)s->W(2*i) * (int16_t)d->W(2*i) +
+ (int16_t)s->W(2*i+1) * (int16_t)d->W(2*i+1);
+ }
+}
+
+#if SHIFT == 0
+static inline int abs1(int a)
+{
+ if (a < 0)
+ return -a;
+ else
+ return a;
+}
+#endif
+void OPPROTO glue(op_psadbw, SUFFIX) (void)
+{
+ unsigned int val;
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ val = 0;
+ val += abs1(d->B(0) - s->B(0));
+ val += abs1(d->B(1) - s->B(1));
+ val += abs1(d->B(2) - s->B(2));
+ val += abs1(d->B(3) - s->B(3));
+ val += abs1(d->B(4) - s->B(4));
+ val += abs1(d->B(5) - s->B(5));
+ val += abs1(d->B(6) - s->B(6));
+ val += abs1(d->B(7) - s->B(7));
+ d->Q(0) = val;
+#if SHIFT == 1
+ val = 0;
+ val += abs1(d->B(8) - s->B(8));
+ val += abs1(d->B(9) - s->B(9));
+ val += abs1(d->B(10) - s->B(10));
+ val += abs1(d->B(11) - s->B(11));
+ val += abs1(d->B(12) - s->B(12));
+ val += abs1(d->B(13) - s->B(13));
+ val += abs1(d->B(14) - s->B(14));
+ val += abs1(d->B(15) - s->B(15));
+ d->Q(1) = val;
+#endif
+}
+
+void OPPROTO glue(op_maskmov, SUFFIX) (void)
+{
+ int i;
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ for(i = 0; i < (8 << SHIFT); i++) {
+ if (s->B(i) & 0x80)
+ stb(A0, d->B(i));
+ }
+}
+
+void OPPROTO glue(op_movl_mm_T0, SUFFIX) (void)
+{
+ Reg *d;
+ d = (Reg *)((char *)env + PARAM1);
+ d->L(0) = T0;
+ d->L(1) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+}
+
+void OPPROTO glue(op_movl_T0_mm, SUFFIX) (void)
+{
+ Reg *s;
+ s = (Reg *)((char *)env + PARAM1);
+ T0 = s->L(0);
+}
+
+#if SHIFT == 0
+void OPPROTO glue(op_pshufw, SUFFIX) (void)
+{
+ Reg r, *d, *s;
+ int order;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ order = PARAM3;
+ r.W(0) = s->W(order & 3);
+ r.W(1) = s->W((order >> 2) & 3);
+ r.W(2) = s->W((order >> 4) & 3);
+ r.W(3) = s->W((order >> 6) & 3);
+ *d = r;
+}
+#else
+void OPPROTO op_shufpd(void)
+{
+ Reg r, *d, *s;
+ int order;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ order = PARAM3;
+ r.Q(0) = s->Q(order & 1);
+ r.Q(1) = s->Q((order >> 1) & 1);
+ *d = r;
+}
+
+void OPPROTO glue(op_pshufd, SUFFIX) (void)
+{
+ Reg r, *d, *s;
+ int order;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ order = PARAM3;
+ r.L(0) = s->L(order & 3);
+ r.L(1) = s->L((order >> 2) & 3);
+ r.L(2) = s->L((order >> 4) & 3);
+ r.L(3) = s->L((order >> 6) & 3);
+ *d = r;
+}
+
+void OPPROTO glue(op_pshuflw, SUFFIX) (void)
+{
+ Reg r, *d, *s;
+ int order;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ order = PARAM3;
+ r.W(0) = s->W(order & 3);
+ r.W(1) = s->W((order >> 2) & 3);
+ r.W(2) = s->W((order >> 4) & 3);
+ r.W(3) = s->W((order >> 6) & 3);
+ r.Q(1) = s->Q(1);
+ *d = r;
+}
+
+void OPPROTO glue(op_pshufhw, SUFFIX) (void)
+{
+ Reg r, *d, *s;
+ int order;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ order = PARAM3;
+ r.Q(0) = s->Q(0);
+ r.W(4) = s->W(4 + (order & 3));
+ r.W(5) = s->W(4 + ((order >> 2) & 3));
+ r.W(6) = s->W(4 + ((order >> 4) & 3));
+ r.W(7) = s->W(4 + ((order >> 6) & 3));
+ *d = r;
+}
+#endif
+
+#if SHIFT == 1
+/* FPU ops */
+/* XXX: not accurate */
+
+#define SSE_OP_S(name, F)\
+void OPPROTO op_ ## name ## ps (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->XMM_S(0) = F(d->XMM_S(0), s->XMM_S(0));\
+ d->XMM_S(1) = F(d->XMM_S(1), s->XMM_S(1));\
+ d->XMM_S(2) = F(d->XMM_S(2), s->XMM_S(2));\
+ d->XMM_S(3) = F(d->XMM_S(3), s->XMM_S(3));\
+}\
+\
+void OPPROTO op_ ## name ## ss (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->XMM_S(0) = F(d->XMM_S(0), s->XMM_S(0));\
+}\
+void OPPROTO op_ ## name ## pd (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->XMM_D(0) = F(d->XMM_D(0), s->XMM_D(0));\
+ d->XMM_D(1) = F(d->XMM_D(1), s->XMM_D(1));\
+}\
+\
+void OPPROTO op_ ## name ## sd (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->XMM_D(0) = F(d->XMM_D(0), s->XMM_D(0));\
+}
+
+#define FPU_ADD(a, b) (a) + (b)
+#define FPU_SUB(a, b) (a) - (b)
+#define FPU_MUL(a, b) (a) * (b)
+#define FPU_DIV(a, b) (a) / (b)
+#define FPU_MIN(a, b) (a) < (b) ? (a) : (b)
+#define FPU_MAX(a, b) (a) > (b) ? (a) : (b)
+#define FPU_SQRT(a, b) sqrt(b)
+
+SSE_OP_S(add, FPU_ADD)
+SSE_OP_S(sub, FPU_SUB)
+SSE_OP_S(mul, FPU_MUL)
+SSE_OP_S(div, FPU_DIV)
+SSE_OP_S(min, FPU_MIN)
+SSE_OP_S(max, FPU_MAX)
+SSE_OP_S(sqrt, FPU_SQRT)
+
+
+/* float to float conversions */
+void OPPROTO op_cvtps2pd(void)
+{
+ float s0, s1;
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ s0 = s->XMM_S(0);
+ s1 = s->XMM_S(1);
+ d->XMM_D(0) = s0;
+ d->XMM_D(1) = s1;
+}
+
+void OPPROTO op_cvtpd2ps(void)
+{
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ d->XMM_S(0) = s->XMM_D(0);
+ d->XMM_S(1) = s->XMM_D(1);
+ d->Q(1) = 0;
+}
+
+void OPPROTO op_cvtss2sd(void)
+{
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ d->XMM_D(0) = s->XMM_S(0);
+}
+
+void OPPROTO op_cvtsd2ss(void)
+{
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+ d->XMM_S(0) = s->XMM_D(0);
+}
+
+/* integer to float */
+void OPPROTO op_cvtdq2ps(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_S(0) = (int32_t)s->XMM_L(0);
+ d->XMM_S(1) = (int32_t)s->XMM_L(1);
+ d->XMM_S(2) = (int32_t)s->XMM_L(2);
+ d->XMM_S(3) = (int32_t)s->XMM_L(3);
+}
+
+void OPPROTO op_cvtdq2pd(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ int32_t l0, l1;
+ l0 = (int32_t)s->XMM_L(0);
+ l1 = (int32_t)s->XMM_L(1);
+ d->XMM_D(0) = l0;
+ d->XMM_D(1) = l1;
+}
+
+void OPPROTO op_cvtpi2ps(void)
+{
+ XMMReg *d = (Reg *)((char *)env + PARAM1);
+ MMXReg *s = (MMXReg *)((char *)env + PARAM2);
+ d->XMM_S(0) = (int32_t)s->MMX_L(0);
+ d->XMM_S(1) = (int32_t)s->MMX_L(1);
+}
+
+void OPPROTO op_cvtpi2pd(void)
+{
+ XMMReg *d = (Reg *)((char *)env + PARAM1);
+ MMXReg *s = (MMXReg *)((char *)env + PARAM2);
+ d->XMM_D(0) = (int32_t)s->MMX_L(0);
+ d->XMM_D(1) = (int32_t)s->MMX_L(1);
+}
+
+void OPPROTO op_cvtsi2ss(void)
+{
+ XMMReg *d = (Reg *)((char *)env + PARAM1);
+ d->XMM_S(0) = (int32_t)T0;
+}
+
+void OPPROTO op_cvtsi2sd(void)
+{
+ XMMReg *d = (Reg *)((char *)env + PARAM1);
+ d->XMM_D(0) = (int32_t)T0;
+}
+
+#ifdef TARGET_X86_64
+void OPPROTO op_cvtsq2ss(void)
+{
+ XMMReg *d = (Reg *)((char *)env + PARAM1);
+ d->XMM_S(0) = (int64_t)T0;
+}
+
+void OPPROTO op_cvtsq2sd(void)
+{
+ XMMReg *d = (Reg *)((char *)env + PARAM1);
+ d->XMM_D(0) = (int64_t)T0;
+}
+#endif
+
+/* float to integer */
+void OPPROTO op_cvtps2dq(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_L(0) = lrint(s->XMM_S(0));
+ d->XMM_L(1) = lrint(s->XMM_S(1));
+ d->XMM_L(2) = lrint(s->XMM_S(2));
+ d->XMM_L(3) = lrint(s->XMM_S(3));
+}
+
+void OPPROTO op_cvtpd2dq(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_L(0) = lrint(s->XMM_D(0));
+ d->XMM_L(1) = lrint(s->XMM_D(1));
+ d->XMM_Q(1) = 0;
+}
+
+void OPPROTO op_cvtps2pi(void)
+{
+ MMXReg *d = (MMXReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->MMX_L(0) = lrint(s->XMM_S(0));
+ d->MMX_L(1) = lrint(s->XMM_S(1));
+}
+
+void OPPROTO op_cvtpd2pi(void)
+{
+ MMXReg *d = (MMXReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->MMX_L(0) = lrint(s->XMM_D(0));
+ d->MMX_L(1) = lrint(s->XMM_D(1));
+}
+
+void OPPROTO op_cvtss2si(void)
+{
+ XMMReg *s = (XMMReg *)((char *)env + PARAM1);
+ T0 = (int32_t)lrint(s->XMM_S(0));
+}
+
+void OPPROTO op_cvtsd2si(void)
+{
+ XMMReg *s = (XMMReg *)((char *)env + PARAM1);
+ T0 = (int32_t)lrint(s->XMM_D(0));
+}
+
+#ifdef TARGET_X86_64
+void OPPROTO op_cvtss2sq(void)
+{
+ XMMReg *s = (XMMReg *)((char *)env + PARAM1);
+ T0 = llrint(s->XMM_S(0));
+}
+
+void OPPROTO op_cvtsd2sq(void)
+{
+ XMMReg *s = (XMMReg *)((char *)env + PARAM1);
+ T0 = llrint(s->XMM_D(0));
+}
+#endif
+
+/* float to integer truncated */
+void OPPROTO op_cvttps2dq(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_L(0) = (int32_t)s->XMM_S(0);
+ d->XMM_L(1) = (int32_t)s->XMM_S(1);
+ d->XMM_L(2) = (int32_t)s->XMM_S(2);
+ d->XMM_L(3) = (int32_t)s->XMM_S(3);
+}
+
+void OPPROTO op_cvttpd2dq(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_L(0) = (int32_t)s->XMM_D(0);
+ d->XMM_L(1) = (int32_t)s->XMM_D(1);
+ d->XMM_Q(1) = 0;
+}
+
+void OPPROTO op_cvttps2pi(void)
+{
+ MMXReg *d = (MMXReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->MMX_L(0) = (int32_t)(s->XMM_S(0));
+ d->MMX_L(1) = (int32_t)(s->XMM_S(1));
+}
+
+void OPPROTO op_cvttpd2pi(void)
+{
+ MMXReg *d = (MMXReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->MMX_L(0) = (int32_t)(s->XMM_D(0));
+ d->MMX_L(1) = (int32_t)(s->XMM_D(1));
+}
+
+void OPPROTO op_cvttss2si(void)
+{
+ XMMReg *s = (XMMReg *)((char *)env + PARAM1);
+ T0 = (int32_t)(s->XMM_S(0));
+}
+
+void OPPROTO op_cvttsd2si(void)
+{
+ XMMReg *s = (XMMReg *)((char *)env + PARAM1);
+ T0 = (int32_t)(s->XMM_D(0));
+}
+
+#ifdef TARGET_X86_64
+void OPPROTO op_cvttss2sq(void)
+{
+ XMMReg *s = (XMMReg *)((char *)env + PARAM1);
+ T0 = (int64_t)(s->XMM_S(0));
+}
+
+void OPPROTO op_cvttsd2sq(void)
+{
+ XMMReg *s = (XMMReg *)((char *)env + PARAM1);
+ T0 = (int64_t)(s->XMM_D(0));
+}
+#endif
+
+void OPPROTO op_rsqrtps(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_S(0) = approx_rsqrt(s->XMM_S(0));
+ d->XMM_S(1) = approx_rsqrt(s->XMM_S(1));
+ d->XMM_S(2) = approx_rsqrt(s->XMM_S(2));
+ d->XMM_S(3) = approx_rsqrt(s->XMM_S(3));
+}
+
+void OPPROTO op_rsqrtss(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_S(0) = approx_rsqrt(s->XMM_S(0));
+}
+
+void OPPROTO op_rcpps(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_S(0) = approx_rcp(s->XMM_S(0));
+ d->XMM_S(1) = approx_rcp(s->XMM_S(1));
+ d->XMM_S(2) = approx_rcp(s->XMM_S(2));
+ d->XMM_S(3) = approx_rcp(s->XMM_S(3));
+}
+
+void OPPROTO op_rcpss(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_S(0) = approx_rcp(s->XMM_S(0));
+}
+
+void OPPROTO op_haddps(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ XMMReg r;
+ r.XMM_S(0) = d->XMM_S(0) + d->XMM_S(1);
+ r.XMM_S(1) = d->XMM_S(2) + d->XMM_S(3);
+ r.XMM_S(2) = s->XMM_S(0) + s->XMM_S(1);
+ r.XMM_S(3) = s->XMM_S(2) + s->XMM_S(3);
+ *d = r;
+}
+
+void OPPROTO op_haddpd(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ XMMReg r;
+ r.XMM_D(0) = d->XMM_D(0) + d->XMM_D(1);
+ r.XMM_D(1) = s->XMM_D(0) + s->XMM_D(1);
+ *d = r;
+}
+
+void OPPROTO op_hsubps(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ XMMReg r;
+ r.XMM_S(0) = d->XMM_S(0) - d->XMM_S(1);
+ r.XMM_S(1) = d->XMM_S(2) - d->XMM_S(3);
+ r.XMM_S(2) = s->XMM_S(0) - s->XMM_S(1);
+ r.XMM_S(3) = s->XMM_S(2) - s->XMM_S(3);
+ *d = r;
+}
+
+void OPPROTO op_hsubpd(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ XMMReg r;
+ r.XMM_D(0) = d->XMM_D(0) - d->XMM_D(1);
+ r.XMM_D(1) = s->XMM_D(0) - s->XMM_D(1);
+ *d = r;
+}
+
+void OPPROTO op_addsubps(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_S(0) = d->XMM_S(0) - s->XMM_S(0);
+ d->XMM_S(1) = d->XMM_S(1) + s->XMM_S(1);
+ d->XMM_S(2) = d->XMM_S(2) - s->XMM_S(2);
+ d->XMM_S(3) = d->XMM_S(3) + s->XMM_S(3);
+}
+
+void OPPROTO op_addsubpd(void)
+{
+ XMMReg *d = (XMMReg *)((char *)env + PARAM1);
+ XMMReg *s = (XMMReg *)((char *)env + PARAM2);
+ d->XMM_D(0) = d->XMM_D(0) - s->XMM_D(0);
+ d->XMM_D(1) = d->XMM_D(1) + s->XMM_D(1);
+}
+
+/* XXX: unordered */
+#define SSE_OP_CMP(name, F)\
+void OPPROTO op_ ## name ## ps (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->XMM_L(0) = F(d->XMM_S(0), s->XMM_S(0));\
+ d->XMM_L(1) = F(d->XMM_S(1), s->XMM_S(1));\
+ d->XMM_L(2) = F(d->XMM_S(2), s->XMM_S(2));\
+ d->XMM_L(3) = F(d->XMM_S(3), s->XMM_S(3));\
+}\
+\
+void OPPROTO op_ ## name ## ss (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->XMM_L(0) = F(d->XMM_S(0), s->XMM_S(0));\
+}\
+void OPPROTO op_ ## name ## pd (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->XMM_Q(0) = F(d->XMM_D(0), s->XMM_D(0));\
+ d->XMM_Q(1) = F(d->XMM_D(1), s->XMM_D(1));\
+}\
+\
+void OPPROTO op_ ## name ## sd (void)\
+{\
+ Reg *d, *s;\
+ d = (Reg *)((char *)env + PARAM1);\
+ s = (Reg *)((char *)env + PARAM2);\
+ d->XMM_Q(0) = F(d->XMM_D(0), s->XMM_D(0));\
+}
+
+#define FPU_CMPEQ(a, b) (a) == (b) ? -1 : 0
+#define FPU_CMPLT(a, b) (a) < (b) ? -1 : 0
+#define FPU_CMPLE(a, b) (a) <= (b) ? -1 : 0
+#define FPU_CMPUNORD(a, b) (fpu_isnan(a) || fpu_isnan(b)) ? - 1 : 0
+#define FPU_CMPNEQ(a, b) (a) == (b) ? 0 : -1
+#define FPU_CMPNLT(a, b) (a) < (b) ? 0 : -1
+#define FPU_CMPNLE(a, b) (a) <= (b) ? 0 : -1
+#define FPU_CMPORD(a, b) (!fpu_isnan(a) && !fpu_isnan(b)) ? - 1 : 0
+
+SSE_OP_CMP(cmpeq, FPU_CMPEQ)
+SSE_OP_CMP(cmplt, FPU_CMPLT)
+SSE_OP_CMP(cmple, FPU_CMPLE)
+SSE_OP_CMP(cmpunord, FPU_CMPUNORD)
+SSE_OP_CMP(cmpneq, FPU_CMPNEQ)
+SSE_OP_CMP(cmpnlt, FPU_CMPNLT)
+SSE_OP_CMP(cmpnle, FPU_CMPNLE)
+SSE_OP_CMP(cmpord, FPU_CMPORD)
+
+void OPPROTO op_ucomiss(void)
+{
+ int eflags;
+ float s0, s1;
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ s0 = d->XMM_S(0);
+ s1 = s->XMM_S(0);
+ if (s0 < s1)
+ eflags = CC_C;
+ else if (s0 == s1)
+ eflags = CC_Z;
+ else
+ eflags = 0;
+ CC_SRC = eflags;
+ FORCE_RET();
+}
+
+void OPPROTO op_comiss(void)
+{
+ int eflags;
+ float s0, s1;
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ s0 = d->XMM_S(0);
+ s1 = s->XMM_S(0);
+ if (s0 < s1)
+ eflags = CC_C;
+ else if (s0 == s1)
+ eflags = CC_Z;
+ else
+ eflags = 0;
+ CC_SRC = eflags;
+ FORCE_RET();
+}
+
+void OPPROTO op_ucomisd(void)
+{
+ int eflags;
+ double d0, d1;
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ d0 = d->XMM_D(0);
+ d1 = s->XMM_D(0);
+ if (d0 < d1)
+ eflags = CC_C;
+ else if (d0 == d1)
+ eflags = CC_Z;
+ else
+ eflags = 0;
+ CC_SRC = eflags;
+ FORCE_RET();
+}
+
+void OPPROTO op_comisd(void)
+{
+ int eflags;
+ double d0, d1;
+ Reg *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ d0 = d->XMM_D(0);
+ d1 = s->XMM_D(0);
+ if (d0 < d1)
+ eflags = CC_C;
+ else if (d0 == d1)
+ eflags = CC_Z;
+ else
+ eflags = 0;
+ CC_SRC = eflags;
+ FORCE_RET();
+}
+
+void OPPROTO op_movmskps(void)
+{
+ int b0, b1, b2, b3;
+ Reg *s;
+ s = (Reg *)((char *)env + PARAM1);
+ b0 = s->XMM_L(0) >> 31;
+ b1 = s->XMM_L(1) >> 31;
+ b2 = s->XMM_L(2) >> 31;
+ b3 = s->XMM_L(3) >> 31;
+ T0 = b0 | (b1 << 1) | (b2 << 2) | (b3 << 3);
+}
+
+void OPPROTO op_movmskpd(void)
+{
+ int b0, b1;
+ Reg *s;
+ s = (Reg *)((char *)env + PARAM1);
+ b0 = s->XMM_L(1) >> 31;
+ b1 = s->XMM_L(3) >> 31;
+ T0 = b0 | (b1 << 1);
+}
+
+#endif
+
+void OPPROTO glue(op_pmovmskb, SUFFIX)(void)
+{
+ Reg *s;
+ s = (Reg *)((char *)env + PARAM1);
+ T0 = 0;
+ T0 |= (s->XMM_B(0) >> 7);
+ T0 |= (s->XMM_B(1) >> 6) & 0x02;
+ T0 |= (s->XMM_B(2) >> 5) & 0x04;
+ T0 |= (s->XMM_B(3) >> 4) & 0x08;
+ T0 |= (s->XMM_B(4) >> 3) & 0x10;
+ T0 |= (s->XMM_B(5) >> 2) & 0x20;
+ T0 |= (s->XMM_B(6) >> 1) & 0x40;
+ T0 |= (s->XMM_B(7)) & 0x80;
+#if SHIFT == 1
+ T0 |= (s->XMM_B(8) << 1) & 0x0100;
+ T0 |= (s->XMM_B(9) << 2) & 0x0200;
+ T0 |= (s->XMM_B(10) << 3) & 0x0400;
+ T0 |= (s->XMM_B(11) << 4) & 0x0800;
+ T0 |= (s->XMM_B(12) << 5) & 0x1000;
+ T0 |= (s->XMM_B(13) << 6) & 0x2000;
+ T0 |= (s->XMM_B(14) << 7) & 0x4000;
+ T0 |= (s->XMM_B(15) << 8) & 0x8000;
+#endif
+}
+
+void OPPROTO glue(op_pinsrw, SUFFIX) (void)
+{
+ Reg *d = (Reg *)((char *)env + PARAM1);
+ int pos = PARAM2;
+
+ d->W(pos) = T0;
+}
+
+void OPPROTO glue(op_pextrw, SUFFIX) (void)
+{
+ Reg *s = (Reg *)((char *)env + PARAM1);
+ int pos = PARAM2;
+
+ T0 = s->W(pos);
+}
+
+void OPPROTO glue(op_packsswb, SUFFIX) (void)
+{
+ Reg r, *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ r.B(0) = satsb((int16_t)d->W(0));
+ r.B(1) = satsb((int16_t)d->W(1));
+ r.B(2) = satsb((int16_t)d->W(2));
+ r.B(3) = satsb((int16_t)d->W(3));
+#if SHIFT == 1
+ r.B(4) = satsb((int16_t)d->W(4));
+ r.B(5) = satsb((int16_t)d->W(5));
+ r.B(6) = satsb((int16_t)d->W(6));
+ r.B(7) = satsb((int16_t)d->W(7));
+#endif
+ r.B((4 << SHIFT) + 0) = satsb((int16_t)s->W(0));
+ r.B((4 << SHIFT) + 1) = satsb((int16_t)s->W(1));
+ r.B((4 << SHIFT) + 2) = satsb((int16_t)s->W(2));
+ r.B((4 << SHIFT) + 3) = satsb((int16_t)s->W(3));
+#if SHIFT == 1
+ r.B(12) = satsb((int16_t)s->W(4));
+ r.B(13) = satsb((int16_t)s->W(5));
+ r.B(14) = satsb((int16_t)s->W(6));
+ r.B(15) = satsb((int16_t)s->W(7));
+#endif
+ *d = r;
+}
+
+void OPPROTO glue(op_packuswb, SUFFIX) (void)
+{
+ Reg r, *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ r.B(0) = satub((int16_t)d->W(0));
+ r.B(1) = satub((int16_t)d->W(1));
+ r.B(2) = satub((int16_t)d->W(2));
+ r.B(3) = satub((int16_t)d->W(3));
+#if SHIFT == 1
+ r.B(4) = satub((int16_t)d->W(4));
+ r.B(5) = satub((int16_t)d->W(5));
+ r.B(6) = satub((int16_t)d->W(6));
+ r.B(7) = satub((int16_t)d->W(7));
+#endif
+ r.B((4 << SHIFT) + 0) = satub((int16_t)s->W(0));
+ r.B((4 << SHIFT) + 1) = satub((int16_t)s->W(1));
+ r.B((4 << SHIFT) + 2) = satub((int16_t)s->W(2));
+ r.B((4 << SHIFT) + 3) = satub((int16_t)s->W(3));
+#if SHIFT == 1
+ r.B(12) = satub((int16_t)s->W(4));
+ r.B(13) = satub((int16_t)s->W(5));
+ r.B(14) = satub((int16_t)s->W(6));
+ r.B(15) = satub((int16_t)s->W(7));
+#endif
+ *d = r;
+}
+
+void OPPROTO glue(op_packssdw, SUFFIX) (void)
+{
+ Reg r, *d, *s;
+ d = (Reg *)((char *)env + PARAM1);
+ s = (Reg *)((char *)env + PARAM2);
+
+ r.W(0) = satsw(d->L(0));
+ r.W(1) = satsw(d->L(1));
+#if SHIFT == 1
+ r.W(2) = satsw(d->L(2));
+ r.W(3) = satsw(d->L(3));
+#endif
+ r.W((2 << SHIFT) + 0) = satsw(s->L(0));
+ r.W((2 << SHIFT) + 1) = satsw(s->L(1));
+#if SHIFT == 1
+ r.W(6) = satsw(s->L(2));
+ r.W(7) = satsw(s->L(3));
+#endif
+ *d = r;
+}
+
+#define UNPCK_OP(base_name, base) \
+ \
+void OPPROTO glue(op_punpck ## base_name ## bw, SUFFIX) (void) \
+{ \
+ Reg r, *d, *s; \
+ d = (Reg *)((char *)env + PARAM1); \
+ s = (Reg *)((char *)env + PARAM2); \
+ \
+ r.B(0) = d->B((base << (SHIFT + 2)) + 0); \
+ r.B(1) = s->B((base << (SHIFT + 2)) + 0); \
+ r.B(2) = d->B((base << (SHIFT + 2)) + 1); \
+ r.B(3) = s->B((base << (SHIFT + 2)) + 1); \
+ r.B(4) = d->B((base << (SHIFT + 2)) + 2); \
+ r.B(5) = s->B((base << (SHIFT + 2)) + 2); \
+ r.B(6) = d->B((base << (SHIFT + 2)) + 3); \
+ r.B(7) = s->B((base << (SHIFT + 2)) + 3); \
+XMM_ONLY( \
+ r.B(8) = d->B((base << (SHIFT + 2)) + 4); \
+ r.B(9) = s->B((base << (SHIFT + 2)) + 4); \
+ r.B(10) = d->B((base << (SHIFT + 2)) + 5); \
+ r.B(11) = s->B((base << (SHIFT + 2)) + 5); \
+ r.B(12) = d->B((base << (SHIFT + 2)) + 6); \
+ r.B(13) = s->B((base << (SHIFT + 2)) + 6); \
+ r.B(14) = d->B((base << (SHIFT + 2)) + 7); \
+ r.B(15) = s->B((base << (SHIFT + 2)) + 7); \
+) \
+ *d = r; \
+} \
+ \
+void OPPROTO glue(op_punpck ## base_name ## wd, SUFFIX) (void) \
+{ \
+ Reg r, *d, *s; \
+ d = (Reg *)((char *)env + PARAM1); \
+ s = (Reg *)((char *)env + PARAM2); \
+ \
+ r.W(0) = d->W((base << (SHIFT + 1)) + 0); \
+ r.W(1) = s->W((base << (SHIFT + 1)) + 0); \
+ r.W(2) = d->W((base << (SHIFT + 1)) + 1); \
+ r.W(3) = s->W((base << (SHIFT + 1)) + 1); \
+XMM_ONLY( \
+ r.W(4) = d->W((base << (SHIFT + 1)) + 2); \
+ r.W(5) = s->W((base << (SHIFT + 1)) + 2); \
+ r.W(6) = d->W((base << (SHIFT + 1)) + 3); \
+ r.W(7) = s->W((base << (SHIFT + 1)) + 3); \
+) \
+ *d = r; \
+} \
+ \
+void OPPROTO glue(op_punpck ## base_name ## dq, SUFFIX) (void) \
+{ \
+ Reg r, *d, *s; \
+ d = (Reg *)((char *)env + PARAM1); \
+ s = (Reg *)((char *)env + PARAM2); \
+ \
+ r.L(0) = d->L((base << SHIFT) + 0); \
+ r.L(1) = s->L((base << SHIFT) + 0); \
+XMM_ONLY( \
+ r.L(2) = d->L((base << SHIFT) + 1); \
+ r.L(3) = s->L((base << SHIFT) + 1); \
+) \
+ *d = r; \
+} \
+ \
+XMM_ONLY( \
+void OPPROTO glue(op_punpck ## base_name ## qdq, SUFFIX) (void) \
+{ \
+ Reg r, *d, *s; \
+ d = (Reg *)((char *)env + PARAM1); \
+ s = (Reg *)((char *)env + PARAM2); \
+ \
+ r.Q(0) = d->Q(base); \
+ r.Q(1) = s->Q(base); \
+ *d = r; \
+} \
+)
+
+UNPCK_OP(l, 0)
+UNPCK_OP(h, 1)
+
+#undef SHIFT
+#undef XMM_ONLY
+#undef Reg
+#undef B
+#undef W
+#undef L
+#undef Q
+#undef SUFFIX
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 686c184f15..adcdaed5c4 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -1606,6 +1606,23 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
*offset_ptr = disp;
}
+/* used for LEA and MOV AX, mem */
+static void gen_add_A0_ds_seg(DisasContext *s)
+{
+ int override, must_add_seg;
+ must_add_seg = s->addseg;
+ override = R_DS;
+ if (s->override >= 0) {
+ override = s->override;
+ must_add_seg = 1;
+ } else {
+ override = R_DS;
+ }
+ if (must_add_seg) {
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+ }
+}
+
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg !=
OR_TMP0 */
static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
@@ -2193,6 +2210,22 @@ static void gen_movtl_T0_im(target_ulong val)
#endif
}
+static GenOpFunc1 *gen_ldq_env_A0[3] = {
+ gen_op_ldq_raw_env_A0,
+#ifndef CONFIG_USER_ONLY
+ gen_op_ldq_kernel_env_A0,
+ gen_op_ldq_user_env_A0,
+#endif
+};
+
+static GenOpFunc1 *gen_stq_env_A0[3] = {
+ gen_op_stq_raw_env_A0,
+#ifndef CONFIG_USER_ONLY
+ gen_op_stq_kernel_env_A0,
+ gen_op_stq_user_env_A0,
+#endif
+};
+
static GenOpFunc1 *gen_ldo_env_A0[3] = {
gen_op_ldo_raw_env_A0,
#ifndef CONFIG_USER_ONLY
@@ -2209,6 +2242,693 @@ static GenOpFunc1 *gen_sto_env_A0[3] = {
#endif
};
+#define SSE_SPECIAL ((GenOpFunc2 *)1)
+
+#define MMX_OP2(x) { gen_op_ ## x ## _mmx, gen_op_ ## x ## _xmm }
+#define SSE_FOP(x) { gen_op_ ## x ## ps, gen_op_ ## x ## pd, \
+ gen_op_ ## x ## ss, gen_op_ ## x ## sd, }
+
+static GenOpFunc2 *sse_op_table1[256][4] = {
+ /* pure SSE operations */
+ [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
+ [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
+ [0x12] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
+ [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
+ [0x14] = { gen_op_punpckldq_xmm, gen_op_punpcklqdq_xmm },
+ [0x15] = { gen_op_punpckhdq_xmm, gen_op_punpckhqdq_xmm },
+ [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
+ [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
+
+ [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
+ [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
+ [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
+ [0x2b] = { SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd */
+ [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
+ [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
+ [0x2e] = { gen_op_ucomiss, gen_op_ucomisd },
+ [0x2f] = { gen_op_comiss, gen_op_comisd },
+ [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
+ [0x51] = SSE_FOP(sqrt),
+ [0x52] = { gen_op_rsqrtps, NULL, gen_op_rsqrtss, NULL },
+ [0x53] = { gen_op_rcpps, NULL, gen_op_rcpss, NULL },
+ [0x54] = { gen_op_pand_xmm, gen_op_pand_xmm }, /* andps, andpd */
+ [0x55] = { gen_op_pandn_xmm, gen_op_pandn_xmm }, /* andnps, andnpd */
+ [0x56] = { gen_op_por_xmm, gen_op_por_xmm }, /* orps, orpd */
+ [0x57] = { gen_op_pxor_xmm, gen_op_pxor_xmm }, /* xorps, xorpd */
+ [0x58] = SSE_FOP(add),
+ [0x59] = SSE_FOP(mul),
+ [0x5a] = { gen_op_cvtps2pd, gen_op_cvtpd2ps,
+ gen_op_cvtss2sd, gen_op_cvtsd2ss },
+ [0x5b] = { gen_op_cvtdq2ps, gen_op_cvtps2dq, gen_op_cvttps2dq },
+ [0x5c] = SSE_FOP(sub),
+ [0x5d] = SSE_FOP(min),
+ [0x5e] = SSE_FOP(div),
+ [0x5f] = SSE_FOP(max),
+
+ [0xc2] = SSE_FOP(cmpeq),
+ [0xc6] = { (GenOpFunc2 *)gen_op_pshufd_xmm, (GenOpFunc2 *)gen_op_shufpd },
+
+ /* MMX ops and their SSE extensions */
+ [0x60] = MMX_OP2(punpcklbw),
+ [0x61] = MMX_OP2(punpcklwd),
+ [0x62] = MMX_OP2(punpckldq),
+ [0x63] = MMX_OP2(packsswb),
+ [0x64] = MMX_OP2(pcmpgtb),
+ [0x65] = MMX_OP2(pcmpgtw),
+ [0x66] = MMX_OP2(pcmpgtl),
+ [0x67] = MMX_OP2(packuswb),
+ [0x68] = MMX_OP2(punpckhbw),
+ [0x69] = MMX_OP2(punpckhwd),
+ [0x6a] = MMX_OP2(punpckhdq),
+ [0x6b] = MMX_OP2(packssdw),
+ [0x6c] = { NULL, gen_op_punpcklqdq_xmm },
+ [0x6d] = { NULL, gen_op_punpckhqdq_xmm },
+ [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
+ [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
+ [0x70] = { (GenOpFunc2 *)gen_op_pshufw_mmx,
+ (GenOpFunc2 *)gen_op_pshufd_xmm,
+ (GenOpFunc2 *)gen_op_pshufhw_xmm,
+ (GenOpFunc2 *)gen_op_pshuflw_xmm },
+ [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
+ [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
+ [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
+ [0x74] = MMX_OP2(pcmpeqb),
+ [0x75] = MMX_OP2(pcmpeqw),
+ [0x76] = MMX_OP2(pcmpeql),
+ [0x77] = { SSE_SPECIAL }, /* emms */
+ [0x7c] = { NULL, gen_op_haddpd, NULL, gen_op_haddps },
+ [0x7d] = { NULL, gen_op_hsubpd, NULL, gen_op_hsubps },
+ [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
+ [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
+ [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
+ [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
+ [0xd0] = { NULL, gen_op_addsubpd, NULL, gen_op_addsubps },
+ [0xd1] = MMX_OP2(psrlw),
+ [0xd2] = MMX_OP2(psrld),
+ [0xd3] = MMX_OP2(psrlq),
+ [0xd4] = MMX_OP2(paddq),
+ [0xd5] = MMX_OP2(pmullw),
+ [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+ [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
+ [0xd8] = MMX_OP2(psubusb),
+ [0xd9] = MMX_OP2(psubusw),
+ [0xda] = MMX_OP2(pminub),
+ [0xdb] = MMX_OP2(pand),
+ [0xdc] = MMX_OP2(paddusb),
+ [0xdd] = MMX_OP2(paddusw),
+ [0xde] = MMX_OP2(pmaxub),
+ [0xdf] = MMX_OP2(pandn),
+ [0xe0] = MMX_OP2(pavgb),
+ [0xe1] = MMX_OP2(psraw),
+ [0xe2] = MMX_OP2(psrad),
+ [0xe3] = MMX_OP2(pavgw),
+ [0xe4] = MMX_OP2(pmulhuw),
+ [0xe5] = MMX_OP2(pmulhw),
+ [0xe6] = { NULL, gen_op_cvttpd2dq, gen_op_cvtdq2pd, gen_op_cvtpd2dq },
+ [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
+ [0xe8] = MMX_OP2(psubsb),
+ [0xe9] = MMX_OP2(psubsw),
+ [0xea] = MMX_OP2(pminsw),
+ [0xeb] = MMX_OP2(por),
+ [0xec] = MMX_OP2(paddsb),
+ [0xed] = MMX_OP2(paddsw),
+ [0xee] = MMX_OP2(pmaxsw),
+ [0xef] = MMX_OP2(pxor),
+ [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu (PNI) */
+ [0xf1] = MMX_OP2(psllw),
+ [0xf2] = MMX_OP2(pslld),
+ [0xf3] = MMX_OP2(psllq),
+ [0xf4] = MMX_OP2(pmuludq),
+ [0xf5] = MMX_OP2(pmaddwd),
+ [0xf6] = MMX_OP2(psadbw),
+ [0xf7] = MMX_OP2(maskmov),
+ [0xf8] = MMX_OP2(psubb),
+ [0xf9] = MMX_OP2(psubw),
+ [0xfa] = MMX_OP2(psubl),
+ [0xfb] = MMX_OP2(psubq),
+ [0xfc] = MMX_OP2(paddb),
+ [0xfd] = MMX_OP2(paddw),
+ [0xfe] = MMX_OP2(paddl),
+};
+
+static GenOpFunc2 *sse_op_table2[3 * 8][2] = {
+ [0 + 2] = MMX_OP2(psrlw),
+ [0 + 4] = MMX_OP2(psraw),
+ [0 + 6] = MMX_OP2(psllw),
+ [8 + 2] = MMX_OP2(psrld),
+ [8 + 4] = MMX_OP2(psrad),
+ [8 + 6] = MMX_OP2(pslld),
+ [16 + 2] = MMX_OP2(psrlq),
+ [16 + 3] = { NULL, gen_op_psrldq_xmm },
+ [16 + 6] = MMX_OP2(psllq),
+ [16 + 7] = { NULL, gen_op_pslldq_xmm },
+};
+
+static GenOpFunc1 *sse_op_table3[4 * 3] = {
+ gen_op_cvtsi2ss,
+ gen_op_cvtsi2sd,
+ X86_64_ONLY(gen_op_cvtsq2ss),
+ X86_64_ONLY(gen_op_cvtsq2sd),
+
+ gen_op_cvttss2si,
+ gen_op_cvttsd2si,
+ X86_64_ONLY(gen_op_cvttss2sq),
+ X86_64_ONLY(gen_op_cvttsd2sq),
+
+ gen_op_cvtss2si,
+ gen_op_cvtsd2si,
+ X86_64_ONLY(gen_op_cvtss2sq),
+ X86_64_ONLY(gen_op_cvtsd2sq),
+};
+
+static GenOpFunc2 *sse_op_table4[8][4] = {
+ SSE_FOP(cmpeq),
+ SSE_FOP(cmplt),
+ SSE_FOP(cmple),
+ SSE_FOP(cmpunord),
+ SSE_FOP(cmpneq),
+ SSE_FOP(cmpnlt),
+ SSE_FOP(cmpnle),
+ SSE_FOP(cmpord),
+};
+
+static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
+{
+ int b1, op1_offset, op2_offset, is_xmm, val, ot;
+ int modrm, mod, rm, reg, reg_addr, offset_addr;
+ GenOpFunc2 *sse_op2;
+ GenOpFunc3 *sse_op3;
+
+ b &= 0xff;
+ if (s->prefix & PREFIX_DATA)
+ b1 = 1;
+ else if (s->prefix & PREFIX_REPZ)
+ b1 = 2;
+ else if (s->prefix & PREFIX_REPNZ)
+ b1 = 3;
+ else
+ b1 = 0;
+ sse_op2 = sse_op_table1[b][b1];
+ if (!sse_op2)
+ goto illegal_op;
+ if (b <= 0x5f || b == 0xc6 || b == 0xc2) {
+ is_xmm = 1;
+ } else {
+ if (b1 == 0) {
+ /* MMX case */
+ is_xmm = 0;
+ } else {
+ is_xmm = 1;
+ }
+ }
+ /* simple MMX/SSE operation */
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ return;
+ }
+ if (s->flags & HF_EM_MASK) {
+ illegal_op:
+ gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
+ return;
+ }
+ if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
+ goto illegal_op;
+ if (b == 0x77) {
+ /* emms */
+ gen_op_emms();
+ return;
+ }
+ /* prepare MMX state (XXX: optimize by storing fptt and fptags in
+ the static cpu state) */
+ if (!is_xmm) {
+ gen_op_enter_mmx();
+ }
+
+ modrm = ldub_code(s->pc++);
+ reg = ((modrm >> 3) & 7);
+ if (is_xmm)
+ reg |= rex_r;
+ mod = (modrm >> 6) & 3;
+ if (sse_op2 == SSE_SPECIAL) {
+ b |= (b1 << 8);
+ switch(b) {
+ case 0x0e7: /* movntq */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,fpregs[reg].mmx));
+ break;
+ case 0x1e7: /* movntdq */
+ case 0x02b: /* movntps */
+ case 0x12b: /* movntps */
+ case 0x2f0: /* lddqu */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_sto_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));
+ break;
+ case 0x6e: /* movd mm, ea */
+ gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
+ gen_op_movl_mm_T0_mmx(offsetof(CPUX86State,fpregs[reg].mmx));
+ break;
+ case 0x16e: /* movd xmm, ea */
+ gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
+ gen_op_movl_mm_T0_xmm(offsetof(CPUX86State,xmm_regs[reg]));
+ break;
+ case 0x6f: /* movq mm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,fpregs[reg].mmx));
+ } else {
+ rm = (modrm & 7);
+ gen_op_movq(offsetof(CPUX86State,fpregs[reg].mmx),
+ offsetof(CPUX86State,fpregs[rm].mmx));
+ }
+ break;
+ case 0x010: /* movups */
+ case 0x110: /* movupd */
+ case 0x028: /* movaps */
+ case 0x128: /* movapd */
+ case 0x16f: /* movdqa xmm, ea */
+ case 0x26f: /* movdqu xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_ldo_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
+ offsetof(CPUX86State,xmm_regs[rm]));
+ }
+ break;
+ case 0x210: /* movss xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_op_ld_T0_A0[OT_LONG + s->mem_index]();
+ gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
+ gen_op_movl_T0_0();
+ gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
+ gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
+ gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
+ }
+ break;
+ case 0x310: /* movsd xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ gen_op_movl_T0_0();
+ gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
+ gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
+ }
+ break;
+ case 0x012: /* movlps */
+ case 0x112: /* movlpd */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ } else {
+ /* movhlps */
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
+ }
+ break;
+ case 0x016: /* movhps */
+ case 0x116: /* movhpd */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
+ } else {
+ /* movlhps */
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
+ offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
+ }
+ break;
+ case 0x216: /* movshdup */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_ldo_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
+ offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
+ offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
+ }
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
+ offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
+ break;
+ case 0x7e: /* movd ea, mm */
+ gen_op_movl_T0_mm_mmx(offsetof(CPUX86State,fpregs[reg].mmx));
+ gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
+ break;
+ case 0x17e: /* movd ea, xmm */
+ gen_op_movl_T0_mm_xmm(offsetof(CPUX86State,xmm_regs[reg]));
+ gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
+ break;
+ case 0x27e: /* movq xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
+ }
+ gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
+ break;
+ case 0x7f: /* movq ea, mm */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,fpregs[reg].mmx));
+ } else {
+ rm = (modrm & 7);
+ gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
+ offsetof(CPUX86State,fpregs[reg].mmx));
+ }
+ break;
+ case 0x011: /* movups */
+ case 0x111: /* movupd */
+ case 0x029: /* movaps */
+ case 0x129: /* movapd */
+ case 0x17f: /* movdqa ea, xmm */
+ case 0x27f: /* movdqu ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_sto_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
+ offsetof(CPUX86State,xmm_regs[reg]));
+ }
+ break;
+ case 0x211: /* movss ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_op_movl_T0_env(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
+ gen_op_st_T0_A0[OT_LONG + s->mem_index]();
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
+ }
+ break;
+ case 0x311: /* movsd ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ }
+ break;
+ case 0x013: /* movlps */
+ case 0x113: /* movlpd */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 0x017: /* movhps */
+ case 0x117: /* movhpd */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 0x71: /* shift mm, im */
+ case 0x72:
+ case 0x73:
+ case 0x171: /* shift xmm, im */
+ case 0x172:
+ case 0x173:
+ val = ldub_code(s->pc++);
+ if (is_xmm) {
+ gen_op_movl_T0_im(val);
+ gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0)));
+ gen_op_movl_T0_0();
+ gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(1)));
+ op1_offset = offsetof(CPUX86State,xmm_t0);
+ } else {
+ gen_op_movl_T0_im(val);
+ gen_op_movl_env_T0(offsetof(CPUX86State,mmx_t0.MMX_L(0)));
+ gen_op_movl_T0_0();
+ gen_op_movl_env_T0(offsetof(CPUX86State,mmx_t0.MMX_L(1)));
+ op1_offset = offsetof(CPUX86State,mmx_t0);
+ }
+ sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
+ if (!sse_op2)
+ goto illegal_op;
+ if (is_xmm) {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ } else {
+ rm = (modrm & 7);
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ }
+ sse_op2(op2_offset, op1_offset);
+ break;
+ case 0x050: /* movmskps */
+ gen_op_movmskps(offsetof(CPUX86State,xmm_regs[reg]));
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_reg_T0[OT_LONG][rm]();
+ break;
+ case 0x150: /* movmskpd */
+ gen_op_movmskpd(offsetof(CPUX86State,xmm_regs[reg]));
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_reg_T0[OT_LONG][rm]();
+ break;
+ case 0x02a: /* cvtpi2ps */
+ case 0x12a: /* cvtpi2pd */
+ gen_op_enter_mmx();
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_ldq_env_A0[s->mem_index >> 2](op2_offset);
+ } else {
+ rm = (modrm & 7);
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ }
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ switch(b >> 8) {
+ case 0x0:
+ gen_op_cvtpi2ps(op1_offset, op2_offset);
+ break;
+ default:
+ case 0x1:
+ gen_op_cvtpi2pd(op1_offset, op2_offset);
+ break;
+ }
+ break;
+ case 0x22a: /* cvtsi2ss */
+ case 0x32a: /* cvtsi2sd */
+ ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)](op1_offset);
+ break;
+ case 0x02c: /* cvttps2pi */
+ case 0x12c: /* cvttpd2pi */
+ case 0x02d: /* cvtps2pi */
+ case 0x12d: /* cvtpd2pi */
+ gen_op_enter_mmx();
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ gen_ldo_env_A0[s->mem_index >> 2](op2_offset);
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ }
+ op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
+ switch(b) {
+ case 0x02c:
+ gen_op_cvttps2pi(op1_offset, op2_offset);
+ break;
+ case 0x12c:
+ gen_op_cvttpd2pi(op1_offset, op2_offset);
+ break;
+ case 0x02d:
+ gen_op_cvtps2pi(op1_offset, op2_offset);
+ break;
+ case 0x12d:
+ gen_op_cvtpd2pi(op1_offset, op2_offset);
+ break;
+ }
+ break;
+ case 0x22c: /* cvttss2si */
+ case 0x32c: /* cvttsd2si */
+ case 0x22d: /* cvtss2si */
+ case 0x32d: /* cvtsd2si */
+ ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
+ (b & 1) * 4](op1_offset);
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ break;
+ case 0xc4: /* pinsrw */
+ case 0x1c4:
+ gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ val = ldub_code(s->pc++);
+ if (b1) {
+ val &= 7;
+ gen_op_pinsrw_xmm(offsetof(CPUX86State,xmm_regs[reg]), val);
+ } else {
+ val &= 3;
+ gen_op_pinsrw_mmx(offsetof(CPUX86State,fpregs[reg].mmx), val);
+ }
+ break;
+ case 0xc5: /* pextrw */
+ case 0x1c5:
+ if (mod != 3)
+ goto illegal_op;
+ val = ldub_code(s->pc++);
+ if (b1) {
+ val &= 7;
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_pextrw_xmm(offsetof(CPUX86State,xmm_regs[rm]), val);
+ } else {
+ val &= 3;
+ rm = (modrm & 7);
+ gen_op_pextrw_mmx(offsetof(CPUX86State,fpregs[rm].mmx), val);
+ }
+ reg = ((modrm >> 3) & 7) | rex_r;
+ gen_op_mov_reg_T0[OT_LONG][reg]();
+ break;
+ case 0x1d6: /* movq ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
+ }
+ break;
+ case 0x2d6: /* movq2dq */
+ gen_op_enter_mmx();
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
+ offsetof(CPUX86State,fpregs[reg & 7].mmx));
+ gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
+ break;
+ case 0x3d6: /* movdq2q */
+ gen_op_enter_mmx();
+ rm = (modrm & 7);
+ gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
+ offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
+ break;
+ case 0xd7: /* pmovmskb */
+ case 0x1d7:
+ if (mod != 3)
+ goto illegal_op;
+ if (b1) {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_pmovmskb_xmm(offsetof(CPUX86State,xmm_regs[rm]));
+ } else {
+ rm = (modrm & 7);
+ gen_op_pmovmskb_mmx(offsetof(CPUX86State,fpregs[rm].mmx));
+ }
+ reg = ((modrm >> 3) & 7) | rex_r;
+ gen_op_mov_reg_T0[OT_LONG][reg]();
+ break;
+ default:
+ goto illegal_op;
+ }
+ } else {
+ /* generic MMX or SSE operation */
+ if (b == 0xf7) {
+ /* maskmov : we must prepare A0 */
+ if (mod != 3)
+ goto illegal_op;
+#ifdef TARGET_X86_64
+ if (CODE64(s)) {
+ gen_op_movq_A0_reg[R_EDI]();
+ } else
+#endif
+ {
+ gen_op_movl_A0_reg[R_EDI]();
+ if (s->aflag == 0)
+ gen_op_andl_A0_ffff();
+ }
+ gen_add_A0_ds_seg(s);
+ }
+ if (is_xmm) {
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f) ||
+ b == 0xc2)) {
+ /* specific case for SSE single instructions */
+ if (b1 == 2) {
+ /* 32 bit access */
+ gen_op_ld_T0_A0[OT_LONG + s->mem_index]();
+ gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0)));
+ } else {
+ /* 64 bit access */
+ gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_t0.XMM_D(0)));
+ }
+ } else {
+ gen_ldo_env_A0[s->mem_index >> 2](op2_offset);
+ }
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ }
+ } else {
+ op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_ldq_env_A0[s->mem_index >> 2](op2_offset);
+ } else {
+ rm = (modrm & 7);
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ }
+ }
+ switch(b) {
+ case 0x70: /* pshufx insn */
+ case 0xc6: /* pshufx insn */
+ val = ldub_code(s->pc++);
+ sse_op3 = (GenOpFunc3 *)sse_op2;
+ sse_op3(op1_offset, op2_offset, val);
+ break;
+ case 0xc2:
+ /* compare insns */
+ val = ldub_code(s->pc++);
+ if (val >= 8)
+ goto illegal_op;
+ sse_op2 = sse_op_table4[val][b1];
+ sse_op2(op1_offset, op2_offset);
+ break;
+ default:
+ sse_op2(op1_offset, op2_offset);
+ break;
+ }
+ if (b == 0x2e || b == 0x2f) {
+ s->cc_op = CC_OP_EFLAGS;
+ }
+ }
+}
+
+
/* convert one instruction. s->is_jmp is set if the translation must
be stopped. Return the next pc value */
static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
@@ -3176,20 +3896,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
gen_op_movl_A0_im(offset_addr);
}
- /* handle override */
- {
- int override, must_add_seg;
- must_add_seg = s->addseg;
- if (s->override >= 0) {
- override = s->override;
- must_add_seg = 1;
- } else {
- override = R_DS;
- }
- if (must_add_seg) {
- gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
- }
- }
+ gen_add_A0_ds_seg(s);
if ((b & 2) == 0) {
gen_op_ld_T0_A0[ot + s->mem_index]();
gen_op_mov_reg_T0[ot][R_EAX]();
@@ -3212,21 +3919,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (s->aflag == 0)
gen_op_andl_A0_ffff();
}
- /* handle override */
- {
- int override, must_add_seg;
- must_add_seg = s->addseg;
- override = R_DS;
- if (s->override >= 0) {
- override = s->override;
- must_add_seg = 1;
- } else {
- override = R_DS;
- }
- if (must_add_seg) {
- gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
- }
- }
+ gen_add_A0_ds_seg(s);
gen_op_ldu_T0_A0[OT_BYTE + s->mem_index]();
gen_op_mov_reg_T0[OT_BYTE][R_EAX]();
break;
@@ -4827,33 +5520,6 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/* nothing to do */
}
break;
- case 0x1ae:
- modrm = ldub_code(s->pc++);
- mod = (modrm >> 6) & 3;
- op = (modrm >> 3) & 7;
- switch(op) {
- case 0: /* fxsave */
- if (mod == 3 || !(s->cpuid_features & CPUID_FXSR))
- goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
- gen_op_fxsave_A0((s->dflag == 2));
- break;
- case 1: /* fxrstor */
- if (mod == 3 || !(s->cpuid_features & CPUID_FXSR))
- goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
- gen_op_fxrstor_A0((s->dflag == 2));
- break;
- case 5: /* lfence */
- case 6: /* mfence */
- case 7: /* sfence */
- if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
- goto illegal_op;
- break;
- default:
- goto illegal_op;
- }
- break;
case 0x63: /* arpl or movslS (x86_64) */
#ifdef TARGET_X86_64
if (CODE64(s)) {
@@ -5018,65 +5684,73 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_eob(s);
}
break;
- /* SSE support */
- case 0x16f:
- if (prefixes & PREFIX_DATA) {
- /* movdqa xmm1, xmm2/mem128 */
- if (!(s->cpuid_features & CPUID_SSE))
- goto illegal_op;
- modrm = ldub_code(s->pc++);
- reg = ((modrm >> 3) & 7) | rex_r;
- mod = (modrm >> 6) & 3;
- if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
- gen_ldo_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));
- } else {
- rm = (modrm & 7) | REX_B(s);
- gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
- offsetof(CPUX86State,xmm_regs[rm]));
- }
- } else {
+ /* MMX/SSE/SSE2/PNI support */
+ case 0x1c3: /* MOVNTI reg, mem */
+ if (!(s->cpuid_features & CPUID_SSE2))
goto illegal_op;
- }
+ ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
+ modrm = ldub_code(s->pc++);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ /* generate a generic store */
+ gen_ldst_modrm(s, modrm, ot, reg, 1);
break;
- case 0x1e7:
- if (prefixes & PREFIX_DATA) {
- /* movntdq mem128, xmm1 */
- if (!(s->cpuid_features & CPUID_SSE))
+ case 0x1ae:
+ modrm = ldub_code(s->pc++);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+ switch(op) {
+ case 0: /* fxsave */
+ if (mod == 3 || !(s->cpuid_features & CPUID_FXSR))
goto illegal_op;
- modrm = ldub_code(s->pc++);
- reg = ((modrm >> 3) & 7) | rex_r;
- mod = (modrm >> 6) & 3;
- if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
- gen_sto_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));
- } else {
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_op_fxsave_A0((s->dflag == 2));
+ break;
+ case 1: /* fxrstor */
+ if (mod == 3 || !(s->cpuid_features & CPUID_FXSR))
goto illegal_op;
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_op_fxrstor_A0((s->dflag == 2));
+ break;
+ case 2: /* ldmxcsr */
+ case 3: /* stmxcsr */
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
}
- } else {
- goto illegal_op;
- }
- break;
- case 0x17f:
- if (prefixes & PREFIX_DATA) {
- /* movdqa xmm2/mem128, xmm1 */
- if (!(s->cpuid_features & CPUID_SSE))
+ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
+ mod == 3)
goto illegal_op;
- modrm = ldub_code(s->pc++);
- reg = ((modrm >> 3) & 7) | rex_r;
- mod = (modrm >> 6) & 3;
- if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
- gen_sto_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));
+ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ if (op == 2) {
+ gen_op_ld_T0_A0[OT_LONG + s->mem_index]();
+ gen_op_movl_env_T0(offsetof(CPUX86State, mxcsr));
} else {
- rm = (modrm & 7) | REX_B(s);
- gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
- offsetof(CPUX86State,xmm_regs[reg]));
+ gen_op_movl_T0_env(offsetof(CPUX86State, mxcsr));
+ gen_op_st_T0_A0[OT_LONG + s->mem_index]();
}
- } else {
+ break;
+ case 5: /* lfence */
+ case 6: /* mfence */
+ case 7: /* sfence */
+ if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
+ goto illegal_op;
+ break;
+ default:
goto illegal_op;
}
break;
+ case 0x110 ... 0x117:
+ case 0x128 ... 0x12f:
+ case 0x150 ... 0x177:
+ case 0x17c ... 0x17f:
+ case 0x1c2:
+ case 0x1c4 ... 0x1c6:
+ case 0x1d0 ... 0x1fe:
+ gen_sse(s, b, pc_start, rex_r);
+ break;
default:
goto illegal_op;
}
@@ -5250,6 +5924,12 @@ static uint16_t opc_write_flags[NB_OPS] = {
[INDEX_op_imull_T0_T1] = CC_OSZAPC,
X86_64_DEF([INDEX_op_imulq_T0_T1] = CC_OSZAPC,)
+ /* sse */
+ [INDEX_op_ucomiss] = CC_OSZAPC,
+ [INDEX_op_ucomisd] = CC_OSZAPC,
+ [INDEX_op_comiss] = CC_OSZAPC,
+ [INDEX_op_comisd] = CC_OSZAPC,
+
/* bcd */
[INDEX_op_aam] = CC_OSZAPC,
[INDEX_op_aad] = CC_OSZAPC,
diff --git a/vl.c b/vl.c
index 7a4762489a..fe873bac84 100644
--- a/vl.c
+++ b/vl.c
@@ -2082,15 +2082,14 @@ static void cpu_get_seg(QEMUFile *f, SegmentCache *dt)
void cpu_save(QEMUFile *f, void *opaque)
{
CPUState *env = opaque;
- uint16_t fptag, fpus, fpuc;
+ uint16_t fptag, fpus, fpuc, fpregs_format;
uint32_t hflags;
int i;
-
+
for(i = 0; i < CPU_NB_REGS; i++)
qemu_put_betls(f, &env->regs[i]);
qemu_put_betls(f, &env->eip);
qemu_put_betls(f, &env->eflags);
- qemu_put_betl(f, 0); /* XXX: suppress that */
hflags = env->hflags; /* XXX: suppress most of the redundant hflags */
qemu_put_be32s(f, &hflags);
@@ -2098,23 +2097,37 @@ void cpu_save(QEMUFile *f, void *opaque)
fpuc = env->fpuc;
fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
fptag = 0;
- for (i=7; i>=0; i--) {
- fptag <<= 2;
- if (env->fptags[i]) {
- fptag |= 3;
- }
+ for(i = 0; i < 8; i++) {
+ fptag |= ((!env->fptags[i]) << i);
}
qemu_put_be16s(f, &fpuc);
qemu_put_be16s(f, &fpus);
qemu_put_be16s(f, &fptag);
+#ifdef USE_X86LDOUBLE
+ fpregs_format = 0;
+#else
+ fpregs_format = 1;
+#endif
+ qemu_put_be16s(f, &fpregs_format);
+
for(i = 0; i < 8; i++) {
uint64_t mant;
uint16_t exp;
- cpu_get_fp80(&mant, &exp, env->fpregs[i]);
+#ifdef USE_X86LDOUBLE
+ /* we save the real CPU data (in case of MMX usage only 'mant'
+ contains the MMX register */
+ cpu_get_fp80(&mant, &exp, env->fpregs[i].d);
qemu_put_be64(f, mant);
qemu_put_be16(f, exp);
+#else
+ /* if we use doubles for float emulation, we save the doubles to
+ avoid losing information in case of MMX usage. It can give
+ problems if the image is restored on a CPU where long
+ doubles are used instead. */
+ qemu_put_be64(f, env->fpregs[i].xmm.MMX_Q(0));
+#endif
}
for(i = 0; i < 6; i++)
@@ -2139,12 +2152,14 @@ void cpu_save(QEMUFile *f, void *opaque)
/* MMU */
qemu_put_be32s(f, &env->a20_mask);
-#ifdef TARGET_X86_64
+ /* XMM */
+ qemu_put_be32s(f, &env->mxcsr);
for(i = 0; i < CPU_NB_REGS; i++) {
qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(0));
qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(1));
}
+#ifdef TARGET_X86_64
qemu_put_be64s(f, &env->efer);
qemu_put_be64s(f, &env->star);
qemu_put_be64s(f, &env->lstar);
@@ -2154,40 +2169,97 @@ void cpu_save(QEMUFile *f, void *opaque)
#endif
}
+/* XXX: add that in a FPU generic layer */
+union x86_longdouble {
+ uint64_t mant;
+ uint16_t exp;
+};
+
+#define MANTD1(fp) (fp & ((1LL << 52) - 1))
+#define EXPBIAS1 1023
+#define EXPD1(fp) ((fp >> 52) & 0x7FF)
+#define SIGND1(fp) ((fp >> 32) & 0x80000000)
+
+static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
+{
+ int e;
+ /* mantissa */
+ p->mant = (MANTD1(temp) << 11) | (1LL << 63);
+ /* exponent + sign */
+ e = EXPD1(temp) - EXPBIAS1 + 16383;
+ e |= SIGND1(temp) >> 16;
+ p->exp = e;
+}
+
int cpu_load(QEMUFile *f, void *opaque, int version_id)
{
CPUState *env = opaque;
- int i;
+ int i, guess_mmx;
uint32_t hflags;
- uint16_t fpus, fpuc, fptag;
+ uint16_t fpus, fpuc, fptag, fpregs_format;
- if (version_id != 2)
+ if (version_id != 3)
return -EINVAL;
for(i = 0; i < CPU_NB_REGS; i++)
qemu_get_betls(f, &env->regs[i]);
qemu_get_betls(f, &env->eip);
qemu_get_betls(f, &env->eflags);
- qemu_get_betl(f); /* XXX: suppress that */
qemu_get_be32s(f, &hflags);
qemu_get_be16s(f, &fpuc);
qemu_get_be16s(f, &fpus);
qemu_get_be16s(f, &fptag);
-
+ qemu_get_be16s(f, &fpregs_format);
+
+ /* NOTE: we cannot always restore the FPU state if the image come
+ from a host with a different 'USE_X86LDOUBLE' define. We guess
+ if we are in an MMX state to restore correctly in that case. */
+ guess_mmx = ((fptag == 0xff) && (fpus & 0x3800) == 0);
for(i = 0; i < 8; i++) {
uint64_t mant;
uint16_t exp;
- mant = qemu_get_be64(f);
- exp = qemu_get_be16(f);
- env->fpregs[i] = cpu_set_fp80(mant, exp);
+ union x86_longdouble *p;
+
+ switch(fpregs_format) {
+ case 0:
+ mant = qemu_get_be64(f);
+ exp = qemu_get_be16(f);
+#ifdef USE_X86LDOUBLE
+ env->fpregs[i].d = cpu_set_fp80(mant, exp);
+#else
+ /* difficult case */
+ if (guess_mmx)
+ env->fpregs[i].xmm.MMX_Q(0) = mant;
+ else
+ env->fpregs[i].d = cpu_set_fp80(mant, exp);
+#endif
+ break;
+ case 1:
+ mant = qemu_get_be64(f);
+#ifdef USE_X86LDOUBLE
+ /* difficult case */
+ p = (void *)&env->fpregs[i];
+ if (guess_mmx) {
+ p->mant = mant;
+ p->exp = 0xffff;
+ } else {
+ fp64_to_fp80(p, mant);
+ }
+#else
+ env->fpregs[i].xmm.MMX_Q(0) = mant;
+#endif
+ break;
+ default:
+ return -EINVAL;
+ }
}
env->fpuc = fpuc;
env->fpstt = (fpus >> 11) & 7;
env->fpus = fpus & ~0x3800;
+ fptag ^= 0xff;
for(i = 0; i < 8; i++) {
- env->fptags[i] = ((fptag & 3) == 3);
- fptag >>= 2;
+ env->fptags[i] = (fptag >> i) & 1;
}
for(i = 0; i < 6; i++)
@@ -2212,12 +2284,13 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
/* MMU */
qemu_get_be32s(f, &env->a20_mask);
-#ifdef TARGET_X86_64
+ qemu_get_be32s(f, &env->mxcsr);
for(i = 0; i < CPU_NB_REGS; i++) {
qemu_get_be64s(f, &env->xmm_regs[i].XMM_Q(0));
qemu_get_be64s(f, &env->xmm_regs[i].XMM_Q(1));
}
+#ifdef TARGET_X86_64
qemu_get_be64s(f, &env->efer);
qemu_get_be64s(f, &env->star);
qemu_get_be64s(f, &env->lstar);
@@ -3433,7 +3506,7 @@ int main(int argc, char **argv)
cpu_single_env = env;
register_savevm("timer", 0, 1, timer_save, timer_load, env);
- register_savevm("cpu", 0, 2, cpu_save, cpu_load, env);
+ register_savevm("cpu", 0, 3, cpu_save, cpu_load, env);
register_savevm("ram", 0, 1, ram_save, ram_load, NULL);
qemu_register_reset(main_cpu_reset, global_env);