diff options
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/Makefile.objs | 2 | ||||
-rw-r--r-- | target-i386/cpu-qom.h | 1 | ||||
-rw-r--r-- | target-i386/cpu.c | 1 | ||||
-rw-r--r-- | target-i386/cpu.h | 2 | ||||
-rw-r--r-- | target-i386/kvm.c | 34 | ||||
-rw-r--r-- | target-i386/machine.c | 27 | ||||
-rw-r--r-- | target-i386/monitor.c | 494 |
7 files changed, 558 insertions, 3 deletions
diff --git a/target-i386/Makefile.objs b/target-i386/Makefile.objs index 7a1df2c983..3da413e8bd 100644 --- a/target-i386/Makefile.objs +++ b/target-i386/Makefile.objs @@ -2,6 +2,6 @@ obj-y += translate.o helper.o cpu.o obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o obj-y += gdbstub.o -obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o +obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o obj-$(CONFIG_KVM) += kvm.o obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h index 7a4fddd85f..c35b624c9d 100644 --- a/target-i386/cpu-qom.h +++ b/target-i386/cpu-qom.h @@ -89,6 +89,7 @@ typedef struct X86CPU { bool hyperv_relaxed_timing; int hyperv_spinlock_attempts; bool hyperv_time; + bool hyperv_crash; bool check_cpuid; bool enforce_cpuid; bool expose_kvm; diff --git a/target-i386/cpu.c b/target-i386/cpu.c index cfb8aa7e4e..7c52714014 100644 --- a/target-i386/cpu.c +++ b/target-i386/cpu.c @@ -3121,6 +3121,7 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false), DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false), DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false), + DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false), DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false), DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), diff --git a/target-i386/cpu.h b/target-i386/cpu.h index 3bcf2f6f73..5231e8c300 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -21,6 +21,7 @@ #include "config.h" #include "qemu-common.h" +#include "standard-headers/asm-x86/hyperv.h" #ifdef TARGET_X86_64 #define TARGET_LONG_BITS 64 @@ -908,6 +909,7 @@ typedef struct CPUX86State { uint64_t msr_hv_guest_os_id; uint64_t msr_hv_vapic; uint64_t msr_hv_tsc; + uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS]; /* exception/interrupt handling */ int error_code; diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 066d03d99e..7b0ba179cc 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -33,7 +33,7 @@ #include "hw/i386/apic_internal.h" #include "hw/i386/apic-msidef.h" #include "exec/ioport.h" -#include <asm/hyperv.h> +#include "standard-headers/asm-x86/hyperv.h" #include "hw/pci/pci.h" #include "migration/migration.h" #include "exec/memattrs.h" @@ -80,6 +80,7 @@ static int lm_capable_kernel; static bool has_msr_hv_hypercall; static bool has_msr_hv_vapic; static bool has_msr_hv_tsc; +static bool has_msr_hv_crash; static bool has_msr_mtrr; static bool has_msr_xss; @@ -457,7 +458,8 @@ static bool hyperv_enabled(X86CPU *cpu) return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 && (hyperv_hypercall_available(cpu) || cpu->hyperv_time || - cpu->hyperv_relaxed_timing); + cpu->hyperv_relaxed_timing || + cpu->hyperv_crash); } static Error *invtsc_mig_blocker; @@ -523,6 +525,10 @@ int kvm_arch_init_vcpu(CPUState *cs) c->eax |= 0x200; has_msr_hv_tsc = true; } + if (cpu->hyperv_crash && has_msr_hv_crash) { + c->edx |= HV_X64_GUEST_CRASH_MSR_AVAILABLE; + } + c = &cpuid_data.entries[cpuid_i++]; c->function = HYPERV_CPUID_ENLIGHTMENT_INFO; if (cpu->hyperv_relaxed_timing) { @@ -843,6 +849,10 @@ static int kvm_get_supported_msrs(KVMState *s) has_msr_xss = true; continue; } + if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) { + has_msr_hv_crash = true; + continue; + } } } @@ -1375,6 +1385,16 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc); } + if (has_msr_hv_crash) { + int j; + + for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_P0 + j, + env->msr_hv_crash_params[j]); + + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_CTL, + HV_X64_MSR_CRASH_CTL_NOTIFY); + } if (has_msr_mtrr) { kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype); kvm_msr_entry_set(&msrs[n++], @@ -1730,6 +1750,13 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_hv_tsc) { msrs[n++].index = HV_X64_MSR_REFERENCE_TSC; } + if (has_msr_hv_crash) { + int j; + + for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) { + msrs[n++].index = HV_X64_MSR_CRASH_P0 + j; + } + } if (has_msr_mtrr) { msrs[n++].index = MSR_MTRRdefType; msrs[n++].index = MSR_MTRRfix64K_00000; @@ -1877,6 +1904,9 @@ static int kvm_get_msrs(X86CPU *cpu) case HV_X64_MSR_REFERENCE_TSC: env->msr_hv_tsc = msrs[i].data; break; + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data; + break; case MSR_MTRRdefType: env->mtrr_deftype = msrs[i].data; break; diff --git a/target-i386/machine.c b/target-i386/machine.c index a0df64b577..9fa056341a 100644 --- a/target-i386/machine.c +++ b/target-i386/machine.c @@ -661,6 +661,32 @@ static const VMStateDescription vmstate_msr_hyperv_time = { } }; +static bool hyperv_crash_enable_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + int i; + + for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) { + if (env->msr_hv_crash_params[i]) { + return true; + } + } + return false; +} + +static const VMStateDescription vmstate_msr_hyperv_crash = { + .name = "cpu/msr_hyperv_crash", + .version_id = 1, + .minimum_version_id = 1, + .needed = hyperv_crash_enable_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, + X86CPU, HV_X64_MSR_CRASH_PARAMS), + VMSTATE_END_OF_LIST() + } +}; + static bool avx512_needed(void *opaque) { X86CPU *cpu = opaque; @@ -842,6 +868,7 @@ VMStateDescription vmstate_x86_cpu = { &vmstate_msr_hypercall_hypercall, &vmstate_msr_hyperv_vapic, &vmstate_msr_hyperv_time, + &vmstate_msr_hyperv_crash, &vmstate_avx512, &vmstate_xss, NULL diff --git a/target-i386/monitor.c b/target-i386/monitor.c new file mode 100644 index 0000000000..6ac86360e9 --- /dev/null +++ b/target-i386/monitor.c @@ -0,0 +1,494 @@ +/* + * QEMU monitor + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "cpu.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "hmp.h" + + +static void print_pte(Monitor *mon, hwaddr addr, + hwaddr pte, + hwaddr mask) +{ +#ifdef TARGET_X86_64 + if (addr & (1ULL << 47)) { + addr |= -1LL << 48; + } +#endif + monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx + " %c%c%c%c%c%c%c%c%c\n", + addr, + pte & mask, + pte & PG_NX_MASK ? 'X' : '-', + pte & PG_GLOBAL_MASK ? 'G' : '-', + pte & PG_PSE_MASK ? 'P' : '-', + pte & PG_DIRTY_MASK ? 'D' : '-', + pte & PG_ACCESSED_MASK ? 'A' : '-', + pte & PG_PCD_MASK ? 'C' : '-', + pte & PG_PWT_MASK ? 'T' : '-', + pte & PG_USER_MASK ? 'U' : '-', + pte & PG_RW_MASK ? 'W' : '-'); +} + +static void tlb_info_32(Monitor *mon, CPUArchState *env) +{ + unsigned int l1, l2; + uint32_t pgd, pde, pte; + + pgd = env->cr[3] & ~0xfff; + for(l1 = 0; l1 < 1024; l1++) { + cpu_physical_memory_read(pgd + l1 * 4, &pde, 4); + pde = le32_to_cpu(pde); + if (pde & PG_PRESENT_MASK) { + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { + /* 4M pages */ + print_pte(mon, (l1 << 22), pde, ~((1 << 21) - 1)); + } else { + for(l2 = 0; l2 < 1024; l2++) { + cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4); + pte = le32_to_cpu(pte); + if (pte & PG_PRESENT_MASK) { + print_pte(mon, (l1 << 22) + (l2 << 12), + pte & ~PG_PSE_MASK, + ~0xfff); + } + } + } + } + } +} + +static void tlb_info_pae32(Monitor *mon, CPUArchState *env) +{ + unsigned int l1, l2, l3; + uint64_t pdpe, pde, pte; + uint64_t pdp_addr, pd_addr, pt_addr; + + pdp_addr = env->cr[3] & ~0x1f; + for (l1 = 0; l1 < 4; l1++) { + cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8); + pdpe = le64_to_cpu(pdpe); + if (pdpe & PG_PRESENT_MASK) { + pd_addr = pdpe & 0x3fffffffff000ULL; + for (l2 = 0; l2 < 512; l2++) { + cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8); + pde = le64_to_cpu(pde); + if (pde & PG_PRESENT_MASK) { + if (pde & PG_PSE_MASK) { + /* 2M pages with PAE, CR4.PSE is ignored */ + print_pte(mon, (l1 << 30 ) + (l2 << 21), pde, + ~((hwaddr)(1 << 20) - 1)); + } else { + pt_addr = pde & 0x3fffffffff000ULL; + for (l3 = 0; l3 < 512; l3++) { + cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8); + pte = le64_to_cpu(pte); + if (pte & PG_PRESENT_MASK) { + print_pte(mon, (l1 << 30 ) + (l2 << 21) + + (l3 << 12), + pte & ~PG_PSE_MASK, + ~(hwaddr)0xfff); + } + } + } + } + } + } + } +} + +#ifdef TARGET_X86_64 +static void tlb_info_64(Monitor *mon, CPUArchState *env) +{ + uint64_t l1, l2, l3, l4; + uint64_t pml4e, pdpe, pde, pte; + uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr; + + pml4_addr = env->cr[3] & 0x3fffffffff000ULL; + for (l1 = 0; l1 < 512; l1++) { + cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8); + pml4e = le64_to_cpu(pml4e); + if (pml4e & PG_PRESENT_MASK) { + pdp_addr = pml4e & 0x3fffffffff000ULL; + for (l2 = 0; l2 < 512; l2++) { + cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8); + pdpe = le64_to_cpu(pdpe); + if (pdpe & PG_PRESENT_MASK) { + if (pdpe & PG_PSE_MASK) { + /* 1G pages, CR4.PSE is ignored */ + print_pte(mon, (l1 << 39) + (l2 << 30), pdpe, + 0x3ffffc0000000ULL); + } else { + pd_addr = pdpe & 0x3fffffffff000ULL; + for (l3 = 0; l3 < 512; l3++) { + cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8); + pde = le64_to_cpu(pde); + if (pde & PG_PRESENT_MASK) { + if (pde & PG_PSE_MASK) { + /* 2M pages, CR4.PSE is ignored */ + print_pte(mon, (l1 << 39) + (l2 << 30) + + (l3 << 21), pde, + 0x3ffffffe00000ULL); + } else { + pt_addr = pde & 0x3fffffffff000ULL; + for (l4 = 0; l4 < 512; l4++) { + cpu_physical_memory_read(pt_addr + + l4 * 8, + &pte, 8); + pte = le64_to_cpu(pte); + if (pte & PG_PRESENT_MASK) { + print_pte(mon, (l1 << 39) + + (l2 << 30) + + (l3 << 21) + (l4 << 12), + pte & ~PG_PSE_MASK, + 0x3fffffffff000ULL); + } + } + } + } + } + } + } + } + } + } +} +#endif /* TARGET_X86_64 */ + +void hmp_info_tlb(Monitor *mon, const QDict *qdict) +{ + CPUArchState *env; + + env = mon_get_cpu_env(); + + if (!(env->cr[0] & CR0_PG_MASK)) { + monitor_printf(mon, "PG disabled\n"); + return; + } + if (env->cr[4] & CR4_PAE_MASK) { +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + tlb_info_64(mon, env); + } else +#endif + { + tlb_info_pae32(mon, env); + } + } else { + tlb_info_32(mon, env); + } +} + +static void mem_print(Monitor *mon, hwaddr *pstart, + int *plast_prot, + hwaddr end, int prot) +{ + int prot1; + prot1 = *plast_prot; + if (prot != prot1) { + if (*pstart != -1) { + monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " " + TARGET_FMT_plx " %c%c%c\n", + *pstart, end, end - *pstart, + prot1 & PG_USER_MASK ? 'u' : '-', + 'r', + prot1 & PG_RW_MASK ? 'w' : '-'); + } + if (prot != 0) + *pstart = end; + else + *pstart = -1; + *plast_prot = prot; + } +} + +static void mem_info_32(Monitor *mon, CPUArchState *env) +{ + unsigned int l1, l2; + int prot, last_prot; + uint32_t pgd, pde, pte; + hwaddr start, end; + + pgd = env->cr[3] & ~0xfff; + last_prot = 0; + start = -1; + for(l1 = 0; l1 < 1024; l1++) { + cpu_physical_memory_read(pgd + l1 * 4, &pde, 4); + pde = le32_to_cpu(pde); + end = l1 << 22; + if (pde & PG_PRESENT_MASK) { + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { + prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK); + mem_print(mon, &start, &last_prot, end, prot); + } else { + for(l2 = 0; l2 < 1024; l2++) { + cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4); + pte = le32_to_cpu(pte); + end = (l1 << 22) + (l2 << 12); + if (pte & PG_PRESENT_MASK) { + prot = pte & pde & + (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK); + } else { + prot = 0; + } + mem_print(mon, &start, &last_prot, end, prot); + } + } + } else { + prot = 0; + mem_print(mon, &start, &last_prot, end, prot); + } + } + /* Flush last range */ + mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0); +} + +static void mem_info_pae32(Monitor *mon, CPUArchState *env) +{ + unsigned int l1, l2, l3; + int prot, last_prot; + uint64_t pdpe, pde, pte; + uint64_t pdp_addr, pd_addr, pt_addr; + hwaddr start, end; + + pdp_addr = env->cr[3] & ~0x1f; + last_prot = 0; + start = -1; + for (l1 = 0; l1 < 4; l1++) { + cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8); + pdpe = le64_to_cpu(pdpe); + end = l1 << 30; + if (pdpe & PG_PRESENT_MASK) { + pd_addr = pdpe & 0x3fffffffff000ULL; + for (l2 = 0; l2 < 512; l2++) { + cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8); + pde = le64_to_cpu(pde); + end = (l1 << 30) + (l2 << 21); + if (pde & PG_PRESENT_MASK) { + if (pde & PG_PSE_MASK) { + prot = pde & (PG_USER_MASK | PG_RW_MASK | + PG_PRESENT_MASK); + mem_print(mon, &start, &last_prot, end, prot); + } else { + pt_addr = pde & 0x3fffffffff000ULL; + for (l3 = 0; l3 < 512; l3++) { + cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8); + pte = le64_to_cpu(pte); + end = (l1 << 30) + (l2 << 21) + (l3 << 12); + if (pte & PG_PRESENT_MASK) { + prot = pte & pde & (PG_USER_MASK | PG_RW_MASK | + PG_PRESENT_MASK); + } else { + prot = 0; + } + mem_print(mon, &start, &last_prot, end, prot); + } + } + } else { + prot = 0; + mem_print(mon, &start, &last_prot, end, prot); + } + } + } else { + prot = 0; + mem_print(mon, &start, &last_prot, end, prot); + } + } + /* Flush last range */ + mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0); +} + + +#ifdef TARGET_X86_64 +static void mem_info_64(Monitor *mon, CPUArchState *env) +{ + int prot, last_prot; + uint64_t l1, l2, l3, l4; + uint64_t pml4e, pdpe, pde, pte; + uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end; + + pml4_addr = env->cr[3] & 0x3fffffffff000ULL; + last_prot = 0; + start = -1; + for (l1 = 0; l1 < 512; l1++) { + cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8); + pml4e = le64_to_cpu(pml4e); + end = l1 << 39; + if (pml4e & PG_PRESENT_MASK) { + pdp_addr = pml4e & 0x3fffffffff000ULL; + for (l2 = 0; l2 < 512; l2++) { + cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8); + pdpe = le64_to_cpu(pdpe); + end = (l1 << 39) + (l2 << 30); + if (pdpe & PG_PRESENT_MASK) { + if (pdpe & PG_PSE_MASK) { + prot = pdpe & (PG_USER_MASK | PG_RW_MASK | + PG_PRESENT_MASK); + prot &= pml4e; + mem_print(mon, &start, &last_prot, end, prot); + } else { + pd_addr = pdpe & 0x3fffffffff000ULL; + for (l3 = 0; l3 < 512; l3++) { + cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8); + pde = le64_to_cpu(pde); + end = (l1 << 39) + (l2 << 30) + (l3 << 21); + if (pde & PG_PRESENT_MASK) { + if (pde & PG_PSE_MASK) { + prot = pde & (PG_USER_MASK | PG_RW_MASK | + PG_PRESENT_MASK); + prot &= pml4e & pdpe; + mem_print(mon, &start, &last_prot, end, prot); + } else { + pt_addr = pde & 0x3fffffffff000ULL; + for (l4 = 0; l4 < 512; l4++) { + cpu_physical_memory_read(pt_addr + + l4 * 8, + &pte, 8); + pte = le64_to_cpu(pte); + end = (l1 << 39) + (l2 << 30) + + (l3 << 21) + (l4 << 12); + if (pte & PG_PRESENT_MASK) { + prot = pte & (PG_USER_MASK | PG_RW_MASK | + PG_PRESENT_MASK); + prot &= pml4e & pdpe & pde; + } else { + prot = 0; + } + mem_print(mon, &start, &last_prot, end, prot); + } + } + } else { + prot = 0; + mem_print(mon, &start, &last_prot, end, prot); + } + } + } + } else { + prot = 0; + mem_print(mon, &start, &last_prot, end, prot); + } + } + } else { + prot = 0; + mem_print(mon, &start, &last_prot, end, prot); + } + } + /* Flush last range */ + mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0); +} +#endif /* TARGET_X86_64 */ + +void hmp_info_mem(Monitor *mon, const QDict *qdict) +{ + CPUArchState *env; + + env = mon_get_cpu_env(); + + if (!(env->cr[0] & CR0_PG_MASK)) { + monitor_printf(mon, "PG disabled\n"); + return; + } + if (env->cr[4] & CR4_PAE_MASK) { +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + mem_info_64(mon, env); + } else +#endif + { + mem_info_pae32(mon, env); + } + } else { + mem_info_32(mon, env); + } +} + +void hmp_mce(Monitor *mon, const QDict *qdict) +{ + X86CPU *cpu; + CPUState *cs; + int cpu_index = qdict_get_int(qdict, "cpu_index"); + int bank = qdict_get_int(qdict, "bank"); + uint64_t status = qdict_get_int(qdict, "status"); + uint64_t mcg_status = qdict_get_int(qdict, "mcg_status"); + uint64_t addr = qdict_get_int(qdict, "addr"); + uint64_t misc = qdict_get_int(qdict, "misc"); + int flags = MCE_INJECT_UNCOND_AO; + + if (qdict_get_try_bool(qdict, "broadcast", false)) { + flags |= MCE_INJECT_BROADCAST; + } + cs = qemu_get_cpu(cpu_index); + if (cs != NULL) { + cpu = X86_CPU(cs); + cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc, + flags); + } +} + +static target_long monitor_get_pc(const struct MonitorDef *md, int val) +{ + CPUArchState *env = mon_get_cpu_env(); + return env->eip + env->segs[R_CS].base; +} + +const MonitorDef monitor_defs[] = { +#define SEG(name, seg) \ + { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\ + { name ".base", offsetof(CPUX86State, segs[seg].base) },\ + { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 }, + + { "eax", offsetof(CPUX86State, regs[0]) }, + { "ecx", offsetof(CPUX86State, regs[1]) }, + { "edx", offsetof(CPUX86State, regs[2]) }, + { "ebx", offsetof(CPUX86State, regs[3]) }, + { "esp|sp", offsetof(CPUX86State, regs[4]) }, + { "ebp|fp", offsetof(CPUX86State, regs[5]) }, + { "esi", offsetof(CPUX86State, regs[6]) }, + { "edi", offsetof(CPUX86State, regs[7]) }, +#ifdef TARGET_X86_64 + { "r8", offsetof(CPUX86State, regs[8]) }, + { "r9", offsetof(CPUX86State, regs[9]) }, + { "r10", offsetof(CPUX86State, regs[10]) }, + { "r11", offsetof(CPUX86State, regs[11]) }, + { "r12", offsetof(CPUX86State, regs[12]) }, + { "r13", offsetof(CPUX86State, regs[13]) }, + { "r14", offsetof(CPUX86State, regs[14]) }, + { "r15", offsetof(CPUX86State, regs[15]) }, +#endif + { "eflags", offsetof(CPUX86State, eflags) }, + { "eip", offsetof(CPUX86State, eip) }, + SEG("cs", R_CS) + SEG("ds", R_DS) + SEG("es", R_ES) + SEG("ss", R_SS) + SEG("fs", R_FS) + SEG("gs", R_GS) + { "pc", 0, monitor_get_pc, }, + { NULL }, +}; + +const MonitorDef *target_monitor_defs(void) +{ + return monitor_defs; +} |