aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2006-09-24 18:41:56 +0000
committerbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2006-09-24 18:41:56 +0000
commit3b21e03e043236a0569383ff5d677c336e3d8b3a (patch)
treea7912035f32e2e8c5a13d8743a4a0880bcc2fc15
parentba86345802fcede0b0f50393c97c128aa7a3f40c (diff)
added SMM support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@2169 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r--cpu-all.h2
-rw-r--r--cpu-exec.c12
-rw-r--r--target-i386/cpu.h4
-rw-r--r--target-i386/exec.h2
-rw-r--r--target-i386/helper.c263
-rw-r--r--target-i386/helper2.c9
-rw-r--r--target-i386/op.c5
-rw-r--r--target-i386/translate.c11
-rw-r--r--vl.c5
9 files changed, 307 insertions, 6 deletions
diff --git a/cpu-all.h b/cpu-all.h
index 996289eafa..a8628b5020 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -762,6 +762,7 @@ extern int code_copy_enabled;
#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
+#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
void cpu_interrupt(CPUState *s, int mask);
void cpu_reset_interrupt(CPUState *env, int mask);
@@ -839,6 +840,7 @@ typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
void cpu_register_physical_memory(target_phys_addr_t start_addr,
unsigned long size,
unsigned long phys_offset);
+uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr);
int cpu_register_io_memory(int io_index,
CPUReadMemoryFunc **mem_read,
CPUWriteMemoryFunc **mem_write,
diff --git a/cpu-exec.c b/cpu-exec.c
index 6385639f46..0b5f7f3d86 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -458,8 +458,16 @@ int cpu_exec(CPUState *env1)
interrupt_request = env->interrupt_request;
if (__builtin_expect(interrupt_request, 0)) {
#if defined(TARGET_I386)
- /* if hardware interrupt pending, we execute it */
- if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ if ((interrupt_request & CPU_INTERRUPT_SMI) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ env->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ do_smm_enter();
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
+ tmp_T0 = 0;
+#else
+ T0 = 0;
+#endif
+ } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 2f23617303..55e7a98c54 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -142,6 +142,7 @@
#define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */
#define HF_VM_SHIFT 17 /* must be same as eflags */
#define HF_HALTED_SHIFT 18 /* CPU halted */
+#define HF_SMM_SHIFT 19 /* CPU in SMM mode */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
@@ -158,6 +159,7 @@
#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
#define HF_HALTED_MASK (1 << HF_HALTED_SHIFT)
+#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
#define CR0_PE_MASK (1 << 0)
#define CR0_MP_MASK (1 << 1)
@@ -503,6 +505,7 @@ typedef struct CPUX86State {
int exception_is_int;
target_ulong exception_next_eip;
target_ulong dr[8]; /* debug registers */
+ uint32_t smbase;
int interrupt_request;
int user_mode_only; /* user mode only simulation */
@@ -630,6 +633,7 @@ void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);
#ifndef NO_CPU_IO_DEFS
uint8_t cpu_get_apic_tpr(CPUX86State *env);
#endif
+void cpu_smm_update(CPUX86State *env);
/* will be suppressed */
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
diff --git a/target-i386/exec.h b/target-i386/exec.h
index 609a5869a0..377f7bd28b 100644
--- a/target-i386/exec.h
+++ b/target-i386/exec.h
@@ -176,6 +176,7 @@ void raise_interrupt(int intno, int is_int, int error_code,
int next_eip_addend);
void raise_exception_err(int exception_index, int error_code);
void raise_exception(int exception_index);
+void do_smm_enter(void);
void __hidden cpu_loop_exit(void);
void OPPROTO op_movl_eflags_T0(void);
@@ -203,6 +204,7 @@ void helper_lsl(void);
void helper_lar(void);
void helper_verr(void);
void helper_verw(void);
+void helper_rsm(void);
void check_iob_T0(void);
void check_iow_T0(void);
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 70e9fae3b0..d990c07ed1 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -1215,6 +1215,269 @@ void raise_exception(int exception_index)
raise_interrupt(exception_index, 0, 0, 0);
}
+/* SMM support */
+
+#ifdef TARGET_X86_64
+#define SMM_REVISION_ID 0x00020064
+#else
+#define SMM_REVISION_ID 0x00020000
+#endif
+
+void do_smm_enter(void)
+{
+ target_ulong sm_state;
+ SegmentCache *dt;
+ int i, offset;
+
+ if (loglevel & CPU_LOG_INT) {
+ fprintf(logfile, "SMM: enter\n");
+ cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
+ }
+
+ env->hflags |= HF_SMM_MASK;
+ cpu_smm_update(env);
+
+ sm_state = env->smbase + 0x8000;
+
+#ifdef TARGET_X86_64
+ for(i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ offset = 0x7e00 + i * 16;
+ stw_phys(sm_state + offset, dt->selector);
+ stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
+ stl_phys(sm_state + offset + 4, dt->limit);
+ stq_phys(sm_state + offset + 8, dt->base);
+ }
+
+ stq_phys(sm_state + 0x7e68, env->gdt.base);
+ stl_phys(sm_state + 0x7e64, env->gdt.limit);
+
+ stw_phys(sm_state + 0x7e70, env->ldt.selector);
+ stq_phys(sm_state + 0x7e78, env->ldt.base);
+ stl_phys(sm_state + 0x7e74, env->ldt.limit);
+ stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
+
+ stq_phys(sm_state + 0x7e88, env->idt.base);
+ stl_phys(sm_state + 0x7e84, env->idt.limit);
+
+ stw_phys(sm_state + 0x7e90, env->tr.selector);
+ stq_phys(sm_state + 0x7e98, env->tr.base);
+ stl_phys(sm_state + 0x7e94, env->tr.limit);
+ stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+
+ stq_phys(sm_state + 0x7ed0, env->efer);
+
+ stq_phys(sm_state + 0x7ff8, EAX);
+ stq_phys(sm_state + 0x7ff0, ECX);
+ stq_phys(sm_state + 0x7fe8, EDX);
+ stq_phys(sm_state + 0x7fe0, EBX);
+ stq_phys(sm_state + 0x7fd8, ESP);
+ stq_phys(sm_state + 0x7fd0, EBP);
+ stq_phys(sm_state + 0x7fc8, ESI);
+ stq_phys(sm_state + 0x7fc0, EDI);
+ for(i = 8; i < 16; i++)
+ stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
+ stq_phys(sm_state + 0x7f78, env->eip);
+ stl_phys(sm_state + 0x7f70, compute_eflags());
+ stl_phys(sm_state + 0x7f68, env->dr[6]);
+ stl_phys(sm_state + 0x7f60, env->dr[7]);
+
+ stl_phys(sm_state + 0x7f48, env->cr[4]);
+ stl_phys(sm_state + 0x7f50, env->cr[3]);
+ stl_phys(sm_state + 0x7f58, env->cr[0]);
+
+ stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
+ stl_phys(sm_state + 0x7f00, env->smbase);
+#else
+ stl_phys(sm_state + 0x7ffc, env->cr[0]);
+ stl_phys(sm_state + 0x7ff8, env->cr[3]);
+ stl_phys(sm_state + 0x7ff4, compute_eflags());
+ stl_phys(sm_state + 0x7ff0, env->eip);
+ stl_phys(sm_state + 0x7fec, EDI);
+ stl_phys(sm_state + 0x7fe8, ESI);
+ stl_phys(sm_state + 0x7fe4, EBP);
+ stl_phys(sm_state + 0x7fe0, ESP);
+ stl_phys(sm_state + 0x7fdc, EBX);
+ stl_phys(sm_state + 0x7fd8, EDX);
+ stl_phys(sm_state + 0x7fd4, ECX);
+ stl_phys(sm_state + 0x7fd0, EAX);
+ stl_phys(sm_state + 0x7fcc, env->dr[6]);
+ stl_phys(sm_state + 0x7fc8, env->dr[7]);
+
+ stl_phys(sm_state + 0x7fc4, env->tr.selector);
+ stl_phys(sm_state + 0x7f64, env->tr.base);
+ stl_phys(sm_state + 0x7f60, env->tr.limit);
+ stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
+
+ stl_phys(sm_state + 0x7fc0, env->ldt.selector);
+ stl_phys(sm_state + 0x7f80, env->ldt.base);
+ stl_phys(sm_state + 0x7f7c, env->ldt.limit);
+ stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
+
+ stl_phys(sm_state + 0x7f74, env->gdt.base);
+ stl_phys(sm_state + 0x7f70, env->gdt.limit);
+
+ stl_phys(sm_state + 0x7f58, env->idt.base);
+ stl_phys(sm_state + 0x7f54, env->idt.limit);
+
+ for(i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ if (i < 3)
+ offset = 0x7f84 + i * 12;
+ else
+ offset = 0x7f2c + (i - 3) * 12;
+ stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
+ stl_phys(sm_state + offset + 8, dt->base);
+ stl_phys(sm_state + offset + 4, dt->limit);
+ stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
+ }
+ stl_phys(sm_state + 0x7f14, env->cr[4]);
+
+ stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
+ stl_phys(sm_state + 0x7ef8, env->smbase);
+#endif
+ /* init SMM cpu state */
+
+ load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->eip = 0x00008000;
+ cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
+ 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
+
+ cpu_x86_update_cr0(env,
+ env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
+ cpu_x86_update_cr4(env, 0);
+ env->dr[7] = 0x00000400;
+#ifdef TARGET_X86_64
+ env->efer = 0;
+#endif
+ CC_OP = CC_OP_EFLAGS;
+}
+
+void helper_rsm(void)
+{
+ target_ulong sm_state;
+ int i, offset;
+ uint32_t val;
+
+ sm_state = env->smbase + 0x8000;
+#ifdef TARGET_X86_64
+ for(i = 0; i < 6; i++) {
+ offset = 0x7e00 + i * 16;
+ cpu_x86_load_seg_cache(env, i,
+ lduw_phys(sm_state + offset),
+ ldq_phys(sm_state + offset + 8),
+ ldl_phys(sm_state + offset + 4),
+ (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
+ }
+
+ env->gdt.base = ldq_phys(sm_state + 0x7e68);
+ env->gdt.limit = ldl_phys(sm_state + 0x7e64);
+
+ env->ldt.selector = lduw_phys(sm_state + 0x7e70);
+ env->ldt.base = ldq_phys(sm_state + 0x7e78);
+ env->ldt.limit = ldl_phys(sm_state + 0x7e74);
+ env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
+
+ env->idt.base = ldq_phys(sm_state + 0x7e88);
+ env->idt.limit = ldl_phys(sm_state + 0x7e84);
+
+ env->tr.selector = lduw_phys(sm_state + 0x7e90);
+ env->tr.base = ldq_phys(sm_state + 0x7e98);
+ env->tr.limit = ldl_phys(sm_state + 0x7e94);
+ env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
+
+ env->efer = ldq_phys(sm_state + 0x7ed0);
+
+ EAX = ldq_phys(sm_state + 0x7ff8);
+ ECX = ldq_phys(sm_state + 0x7ff0);
+ EDX = ldq_phys(sm_state + 0x7fe8);
+ EBX = ldq_phys(sm_state + 0x7fe0);
+ ESP = ldq_phys(sm_state + 0x7fd8);
+ EBP = ldq_phys(sm_state + 0x7fd0);
+ ESI = ldq_phys(sm_state + 0x7fc8);
+ EDI = ldq_phys(sm_state + 0x7fc0);
+ for(i = 8; i < 16; i++)
+ env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
+ env->eip = ldq_phys(sm_state + 0x7f78);
+ load_eflags(ldl_phys(sm_state + 0x7f70),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->dr[6] = ldl_phys(sm_state + 0x7f68);
+ env->dr[7] = ldl_phys(sm_state + 0x7f60);
+
+ cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
+ cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
+ cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
+
+ val = ldl_phys(sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
+ }
+#else
+ cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
+ cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
+ load_eflags(ldl_phys(sm_state + 0x7ff4),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->eip = ldl_phys(sm_state + 0x7ff0);
+ EDI = ldl_phys(sm_state + 0x7fec);
+ ESI = ldl_phys(sm_state + 0x7fe8);
+ EBP = ldl_phys(sm_state + 0x7fe4);
+ ESP = ldl_phys(sm_state + 0x7fe0);
+ EBX = ldl_phys(sm_state + 0x7fdc);
+ EDX = ldl_phys(sm_state + 0x7fd8);
+ ECX = ldl_phys(sm_state + 0x7fd4);
+ EAX = ldl_phys(sm_state + 0x7fd0);
+ env->dr[6] = ldl_phys(sm_state + 0x7fcc);
+ env->dr[7] = ldl_phys(sm_state + 0x7fc8);
+
+ env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
+ env->tr.base = ldl_phys(sm_state + 0x7f64);
+ env->tr.limit = ldl_phys(sm_state + 0x7f60);
+ env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
+
+ env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
+ env->ldt.base = ldl_phys(sm_state + 0x7f80);
+ env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
+ env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
+
+ env->gdt.base = ldl_phys(sm_state + 0x7f74);
+ env->gdt.limit = ldl_phys(sm_state + 0x7f70);
+
+ env->idt.base = ldl_phys(sm_state + 0x7f58);
+ env->idt.limit = ldl_phys(sm_state + 0x7f54);
+
+ for(i = 0; i < 6; i++) {
+ if (i < 3)
+ offset = 0x7f84 + i * 12;
+ else
+ offset = 0x7f2c + (i - 3) * 12;
+ cpu_x86_load_seg_cache(env, i,
+ ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
+ ldl_phys(sm_state + offset + 8),
+ ldl_phys(sm_state + offset + 4),
+ (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
+ }
+ cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
+
+ val = ldl_phys(sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
+ }
+#endif
+ CC_OP = CC_OP_EFLAGS;
+ env->hflags &= ~HF_SMM_MASK;
+ cpu_smm_update(env);
+
+ if (loglevel & CPU_LOG_INT) {
+ fprintf(logfile, "SMM: after RSM\n");
+ cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
+ }
+}
+
#ifdef BUGGY_GCC_DIV64
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
call it from another function */
diff --git a/target-i386/helper2.c b/target-i386/helper2.c
index 19af159f92..f05ae4a691 100644
--- a/target-i386/helper2.c
+++ b/target-i386/helper2.c
@@ -161,7 +161,8 @@ void cpu_reset(CPUX86State *env)
cpu_x86_update_cr0(env, 0x60000010);
env->a20_mask = 0xffffffff;
-
+ env->smbase = 0x30000;
+
env->idt.limit = 0xffff;
env->gdt.limit = 0xffff;
env->ldt.limit = 0xffff;
@@ -268,7 +269,7 @@ void cpu_dump_state(CPUState *env, FILE *f,
"RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
"R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
"R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
- "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n",
+ "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
env->regs[R_EAX],
env->regs[R_EBX],
env->regs[R_ECX],
@@ -296,13 +297,14 @@ void cpu_dump_state(CPUState *env, FILE *f,
env->hflags & HF_CPL_MASK,
(env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
(env->a20_mask >> 20) & 1,
+ (env->hflags >> HF_SMM_SHIFT) & 1,
(env->hflags >> HF_HALTED_SHIFT) & 1);
} else
#endif
{
cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
"ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
- "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n",
+ "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
(uint32_t)env->regs[R_EAX],
(uint32_t)env->regs[R_EBX],
(uint32_t)env->regs[R_ECX],
@@ -322,6 +324,7 @@ void cpu_dump_state(CPUState *env, FILE *f,
env->hflags & HF_CPL_MASK,
(env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
(env->a20_mask >> 20) & 1,
+ (env->hflags >> HF_SMM_SHIFT) & 1,
(env->hflags >> HF_HALTED_SHIFT) & 1);
}
diff --git a/target-i386/op.c b/target-i386/op.c
index 7a3aa77273..7c20f524fe 100644
--- a/target-i386/op.c
+++ b/target-i386/op.c
@@ -678,6 +678,11 @@ void OPPROTO op_reset_inhibit_irq(void)
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
}
+void OPPROTO op_rsm(void)
+{
+ helper_rsm();
+}
+
#if 0
/* vm86plus instructions */
void OPPROTO op_cli_vm(void)
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 5c6453d4fd..ad18af9757 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -6012,6 +6012,17 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
/* ignore for now */
break;
+ case 0x1aa: /* rsm */
+ if (!(s->flags & HF_SMM_MASK))
+ goto illegal_op;
+ if (s->cc_op != CC_OP_DYNAMIC) {
+ gen_op_set_cc_op(s->cc_op);
+ s->cc_op = CC_OP_DYNAMIC;
+ }
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_op_rsm();
+ gen_eob(s);
+ break;
case 0x110 ... 0x117:
case 0x128 ... 0x12f:
case 0x150 ... 0x177:
diff --git a/vl.c b/vl.c
index d3efbbd4b7..c30bb6b4d0 100644
--- a/vl.c
+++ b/vl.c
@@ -4881,6 +4881,7 @@ void cpu_save(QEMUFile *f, void *opaque)
qemu_put_be64s(f, &env->fmask);
qemu_put_be64s(f, &env->kernelgsbase);
#endif
+ qemu_put_be32s(f, &env->smbase);
}
#ifdef USE_X86LDOUBLE
@@ -4914,7 +4915,7 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
uint32_t hflags;
uint16_t fpus, fpuc, fptag, fpregs_format;
- if (version_id != 3)
+ if (version_id != 3 && version_id != 4)
return -EINVAL;
for(i = 0; i < CPU_NB_REGS; i++)
qemu_get_betls(f, &env->regs[i]);
@@ -5017,6 +5018,8 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_be64s(f, &env->fmask);
qemu_get_be64s(f, &env->kernelgsbase);
#endif
+ if (version_id >= 4)
+ qemu_get_be32s(f, &env->smbase);
/* XXX: compute hflags from scratch, except for CPL and IIF */
env->hflags = hflags;