diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/i386/cpu.c | 4 | ||||
-rw-r--r-- | target/i386/cpu.h | 11 | ||||
-rw-r--r-- | target/i386/fpu_helper.c | 60 | ||||
-rw-r--r-- | target/i386/hyperv-proto.h | 1 | ||||
-rw-r--r-- | target/i386/kvm.c | 74 | ||||
-rw-r--r-- | target/i386/machine.c | 20 | ||||
-rw-r--r-- | target/ppc/kvm.c | 2 | ||||
-rw-r--r-- | target/riscv/cpu.c | 2 | ||||
-rw-r--r-- | target/riscv/cpu.h | 7 | ||||
-rw-r--r-- | target/riscv/cpu_helper.c | 28 | ||||
-rw-r--r-- | target/riscv/csr.c | 5 | ||||
-rw-r--r-- | target/riscv/gdbstub.c | 36 | ||||
-rw-r--r-- | target/riscv/pmp.c | 13 |
13 files changed, 234 insertions, 29 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 0de8a22e1e..a624163ac2 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1058,7 +1058,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL, "avx512vbmi", "umip", "pku", - NULL /* ospke */, NULL, "avx512vbmi2", NULL, + NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, "gfni", "vaes", "vpclmulqdq", "avx512vnni", "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, "la57", NULL, NULL, NULL, @@ -6221,6 +6221,8 @@ static Property x86_cpu_properties[] = { HYPERV_FEAT_IPI, 0), DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, HYPERV_FEAT_STIMER_DIRECT, 0), + DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, + hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), diff --git a/target/i386/cpu.h b/target/i386/cpu.h index cedb5bc205..5352c9ff55 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -24,6 +24,7 @@ #include "cpu-qom.h" #include "hyperv-proto.h" #include "exec/cpu-defs.h" +#include "qapi/qapi-types-common.h" /* The x86 has a strong memory model with some store-after-load re-ordering */ #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) @@ -202,6 +203,7 @@ typedef enum X86Seg { #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ +#define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */ #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) @@ -210,6 +212,7 @@ typedef enum X86Seg { #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) +#define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT) #define CR0_PE_SHIFT 0 #define CR0_MP_SHIFT 1 @@ -451,6 +454,7 @@ typedef enum X86Seg { #define MSR_IA32_BNDCFGS 0x00000d90 #define MSR_IA32_XSS 0x00000da0 +#define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_VMX_BASIC 0x00000480 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 @@ -730,6 +734,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_ECX_PKU (1U << 3) /* OS Enable Protection Keys */ #define CPUID_7_0_ECX_OSPKE (1U << 4) +/* UMONITOR/UMWAIT/TPAUSE Instructions */ +#define CPUID_7_0_ECX_WAITPKG (1U << 5) /* Additional AVX-512 Vector Byte Manipulation Instruction */ #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6) /* Galois Field New Instructions */ @@ -1584,6 +1590,7 @@ typedef struct CPUX86State { uint16_t fpregs_format_vmstate; uint64_t xss; + uint32_t umwait; TPRAccess tpr_access_type; @@ -1614,6 +1621,7 @@ struct X86CPU { bool hyperv_synic_kvm_only; uint64_t hyperv_features; bool hyperv_passthrough; + OnOffAuto hyperv_no_nonarch_cs; bool check_cpuid; bool enforce_cpuid; @@ -1755,7 +1763,8 @@ int cpu_x86_support_mca_broadcast(CPUX86State *env); int cpu_get_pic_interrupt(CPUX86State *s); /* MSDOS compatibility mode FPU exception support */ -void cpu_set_ferr(CPUX86State *s); +void x86_register_ferr_irq(qemu_irq irq); +void cpu_set_ignne(void); /* mpx_helper.c */ void cpu_sync_bndcs_hflags(CPUX86State *env); diff --git a/target/i386/fpu_helper.c b/target/i386/fpu_helper.c index 005f1f68f8..99f28f267f 100644 --- a/target/i386/fpu_helper.c +++ b/target/i386/fpu_helper.c @@ -26,6 +26,10 @@ #include "exec/cpu_ldst.h" #include "fpu/softfloat.h" +#ifdef CONFIG_SOFTMMU +#include "hw/irq.h" +#endif + #define FPU_RC_MASK 0xc00 #define FPU_RC_NEAR 0x000 #define FPU_RC_DOWN 0x400 @@ -58,6 +62,36 @@ #define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL) #define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL) +#if !defined(CONFIG_USER_ONLY) +static qemu_irq ferr_irq; + +void x86_register_ferr_irq(qemu_irq irq) +{ + ferr_irq = irq; +} + +static void cpu_clear_ignne(void) +{ + CPUX86State *env = &X86_CPU(first_cpu)->env; + env->hflags2 &= ~HF2_IGNNE_MASK; +} + +void cpu_set_ignne(void) +{ + CPUX86State *env = &X86_CPU(first_cpu)->env; + env->hflags2 |= HF2_IGNNE_MASK; + /* + * We get here in response to a write to port F0h. The chipset should + * deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is + * cleared, because FERR# and FP_IRQ are two separate pins on real + * hardware. However, we don't model FERR# as a qemu_irq, so we just + * do directly what the chipset would do, i.e. deassert FP_IRQ. + */ + qemu_irq_lower(ferr_irq); +} +#endif + + static inline void fpush(CPUX86State *env) { env->fpstt = (env->fpstt - 1) & 7; @@ -136,8 +170,8 @@ static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr) raise_exception_ra(env, EXCP10_COPR, retaddr); } #if !defined(CONFIG_USER_ONLY) - else { - cpu_set_ferr(env); + else if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) { + qemu_irq_raise(ferr_irq); } #endif } @@ -1029,6 +1063,22 @@ void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32) do_fstenv(env, ptr, data32, GETPC()); } +static void cpu_set_fpus(CPUX86State *env, uint16_t fpus) +{ + env->fpstt = (fpus >> 11) & 7; + env->fpus = fpus & ~0x3800 & ~FPUS_B; + env->fpus |= env->fpus & FPUS_SE ? FPUS_B : 0; +#if !defined(CONFIG_USER_ONLY) + if (!(env->fpus & FPUS_SE)) { + /* + * Here the processor deasserts FERR#; in response, the chipset deasserts + * IGNNE#. + */ + cpu_clear_ignne(); + } +#endif +} + static void do_fldenv(CPUX86State *env, target_ulong ptr, int data32, uintptr_t retaddr) { @@ -1043,8 +1093,7 @@ static void do_fldenv(CPUX86State *env, target_ulong ptr, int data32, fpus = cpu_lduw_data_ra(env, ptr + 2, retaddr); fptag = cpu_lduw_data_ra(env, ptr + 4, retaddr); } - env->fpstt = (fpus >> 11) & 7; - env->fpus = fpus & ~0x3800; + cpu_set_fpus(env, fpus); for (i = 0; i < 8; i++) { env->fptags[i] = ((fptag & 3) == 3); fptag >>= 2; @@ -1292,8 +1341,7 @@ static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) fpus = cpu_lduw_data_ra(env, ptr + XO(legacy.fsw), ra); fptag = cpu_lduw_data_ra(env, ptr + XO(legacy.ftw), ra); cpu_set_fpuc(env, fpuc); - env->fpstt = (fpus >> 11) & 7; - env->fpus = fpus & ~0x3800; + cpu_set_fpus(env, fpus); fptag ^= 0xff; for (i = 0; i < 8; i++) { env->fptags[i] = ((fptag >> i) & 1); diff --git a/target/i386/hyperv-proto.h b/target/i386/hyperv-proto.h index cffac10b45..056a305be3 100644 --- a/target/i386/hyperv-proto.h +++ b/target/i386/hyperv-proto.h @@ -63,6 +63,7 @@ #define HV_CLUSTER_IPI_RECOMMENDED (1u << 10) #define HV_EX_PROCESSOR_MASKS_RECOMMENDED (1u << 11) #define HV_ENLIGHTENED_VMCS_RECOMMENDED (1u << 14) +#define HV_NO_NONARCH_CORESHARING (1u << 18) /* * Basic virtualized MSRs diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 8c73438c67..bfd09bd441 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -95,6 +95,7 @@ static bool has_msr_hv_stimer; static bool has_msr_hv_frequencies; static bool has_msr_hv_reenlightenment; static bool has_msr_xss; +static bool has_msr_umwait; static bool has_msr_spec_ctrl; static bool has_msr_virt_ssbd; static bool has_msr_smi_count; @@ -401,6 +402,12 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, if (host_tsx_blacklisted()) { ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE); } + } else if (function == 7 && index == 0 && reg == R_ECX) { + if (enable_cpu_pm) { + ret |= CPUID_7_0_ECX_WAITPKG; + } else { + ret &= ~CPUID_7_0_ECX_WAITPKG; + } } else if (function == 7 && index == 0 && reg == R_EDX) { /* * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. @@ -592,9 +599,9 @@ static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) (MCM_ADDR_PHYS << 6) | 0xc, flags); } -static void hardware_memory_error(void) +static void hardware_memory_error(void *host_addr) { - fprintf(stderr, "Hardware memory error!\n"); + error_report("QEMU got Hardware memory error at addr %p", host_addr); exit(1); } @@ -618,15 +625,34 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { kvm_hwpoison_page_add(ram_addr); kvm_mce_inject(cpu, paddr, code); + + /* + * Use different logging severity based on error type. + * If there is additional MCE reporting on the hypervisor, QEMU VA + * could be another source to identify the PA and MCE details. + */ + if (code == BUS_MCEERR_AR) { + error_report("Guest MCE Memory Error at QEMU addr %p and " + "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", + addr, paddr, "BUS_MCEERR_AR"); + } else { + warn_report("Guest MCE Memory Error at QEMU addr %p and " + "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", + addr, paddr, "BUS_MCEERR_AO"); + } + return; } - fprintf(stderr, "Hardware memory error for memory used by " - "QEMU itself instead of guest system!\n"); + if (code == BUS_MCEERR_AO) { + warn_report("Hardware memory error at addr %p of type %s " + "for memory used by QEMU itself instead of guest system!", + addr, "BUS_MCEERR_AO"); + } } if (code == BUS_MCEERR_AR) { - hardware_memory_error(); + hardware_memory_error(addr); } /* Hope we are lucky for AO MCE */ @@ -1208,6 +1234,16 @@ static int hyperv_handle_properties(CPUState *cs, } } + if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { + env->features[FEAT_HV_RECOMM_EAX] |= HV_NO_NONARCH_CORESHARING; + } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { + c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); + if (c) { + env->features[FEAT_HV_RECOMM_EAX] |= + c->eax & HV_NO_NONARCH_CORESHARING; + } + } + /* Features */ r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED); r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC); @@ -1321,6 +1357,7 @@ free: } static Error *hv_passthrough_mig_blocker; +static Error *hv_no_nonarch_cs_mig_blocker; static int hyperv_init_vcpu(X86CPU *cpu) { @@ -1340,6 +1377,21 @@ static int hyperv_init_vcpu(X86CPU *cpu) } } + if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO && + hv_no_nonarch_cs_mig_blocker == NULL) { + error_setg(&hv_no_nonarch_cs_mig_blocker, + "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration" + " use explicit 'hv-no-nonarch-coresharing=on' instead (but" + " make sure SMT is disabled and/or that vCPUs are properly" + " pinned)"); + ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err); + if (local_err) { + error_report_err(local_err); + error_free(hv_no_nonarch_cs_mig_blocker); + return ret; + } + } + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) { /* * the kernel doesn't support setting vp_index; assert that its value @@ -1954,6 +2006,9 @@ static int kvm_get_supported_msrs(KVMState *s) case MSR_IA32_XSS: has_msr_xss = true; break; + case MSR_IA32_UMWAIT_CONTROL: + has_msr_umwait = true; + break; case HV_X64_MSR_CRASH_CTL: has_msr_hv_crash = true; break; @@ -2633,6 +2688,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); } + if (has_msr_umwait) { + kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait); + } if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); } @@ -3046,6 +3104,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); } + if (has_msr_umwait) { + kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0); + } if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); } @@ -3298,6 +3359,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_XSS: env->xss = msrs[i].data; break; + case MSR_IA32_UMWAIT_CONTROL: + env->umwait = msrs[i].data; + break; default: if (msrs[i].index >= MSR_MC0_CTL && msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { diff --git a/target/i386/machine.c b/target/i386/machine.c index 2767b3096d..6481f846f6 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -943,6 +943,25 @@ static const VMStateDescription vmstate_xss = { } }; +static bool umwait_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return env->umwait != 0; +} + +static const VMStateDescription vmstate_umwait = { + .name = "cpu/umwait", + .version_id = 1, + .minimum_version_id = 1, + .needed = umwait_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(env.umwait, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + #ifdef TARGET_X86_64 static bool pkru_needed(void *opaque) { @@ -1391,6 +1410,7 @@ VMStateDescription vmstate_x86_cpu = { &vmstate_msr_hyperv_reenlightenment, &vmstate_avx512, &vmstate_xss, + &vmstate_umwait, &vmstate_tsc_khz, &vmstate_msr_smi_count, #ifdef TARGET_X86_64 diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 820724cc7d..7d2e8969ac 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -411,7 +411,7 @@ void kvm_check_mmu(PowerPCCPU *cpu, Error **errp) * will be a normal mapping, not a special hugepage one used * for RAM. */ - if (getpagesize() < 0x10000) { + if (qemu_real_host_page_size < 0x10000) { error_setg(errp, "KVM can't supply 64kiB CI pages, which guest expects"); } diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index f13e298a36..3939963b71 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -484,7 +484,7 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) cc->gdb_stop_before_watchpoint = true; cc->disas_set_info = riscv_cpu_disas_set_info; #ifndef CONFIG_USER_ONLY - cc->do_unassigned_access = riscv_cpu_unassigned_access; + cc->do_transaction_failed = riscv_cpu_do_transaction_failed; cc->do_unaligned_access = riscv_cpu_do_unaligned_access; cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug; #endif diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 124ed33ee4..8c64c68538 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -264,8 +264,11 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -void riscv_cpu_unassigned_access(CPUState *cpu, hwaddr addr, bool is_write, - bool is_exec, int unused, unsigned size); +void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); char *riscv_isa_string(RISCVCPU *cpu); void riscv_cpu_list(void); diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 87dd6a6ece..f13131a51b 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -169,7 +169,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, /* NOTE: the env->pc value visible here will not be * correct, but the value visible to the exception handler * (riscv_cpu_do_interrupt) is correct */ - + MemTxResult res; + MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; int mode = mmu_idx; if (mode == PRV_M && access_type != MMU_INST_FETCH) { @@ -256,11 +257,16 @@ restart: 1 << MMU_DATA_LOAD, PRV_S)) { return TRANSLATE_PMP_FAIL; } + #if defined(TARGET_RISCV32) - target_ulong pte = ldl_phys(cs->as, pte_addr); + target_ulong pte = address_space_ldl(cs->as, pte_addr, attrs, &res); #elif defined(TARGET_RISCV64) - target_ulong pte = ldq_phys(cs->as, pte_addr); + target_ulong pte = address_space_ldq(cs->as, pte_addr, attrs, &res); #endif + if (res != MEMTX_OK) { + return TRANSLATE_FAIL; + } + hwaddr ppn = pte >> PTE_PPN_SHIFT; if (!(pte & PTE_V)) { @@ -402,20 +408,23 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return phys_addr; } -void riscv_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write, - bool is_exec, int unused, unsigned size) +void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - if (is_write) { + if (access_type == MMU_DATA_STORE) { cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; } else { cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; } env->badaddr = addr; - riscv_raise_exception(&cpu->env, cs->exception_index, GETPC()); + riscv_raise_exception(&cpu->env, cs->exception_index, retaddr); } void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, @@ -446,9 +455,9 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { -#ifndef CONFIG_USER_ONLY RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; +#ifndef CONFIG_USER_ONLY hwaddr pa = 0; int prot; bool pmp_violation = false; @@ -499,7 +508,10 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, case MMU_DATA_STORE: cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; break; + default: + g_assert_not_reached(); } + env->badaddr = address; cpu_loop_exit_restore(cs, retaddr); #endif } diff --git a/target/riscv/csr.c b/target/riscv/csr.c index f767ad24be..974c9c20b5 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -801,7 +801,10 @@ int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, #if !defined(CONFIG_USER_ONLY) int csr_priv = get_field(csrno, 0x300); int read_only = get_field(csrno, 0xC00) == 3; - if ((write_mask && read_only) || (env->priv < csr_priv)) { + if ((!env->debugger) && (env->priv < csr_priv)) { + return -1; + } + if (write_mask && read_only) { return -1; } #endif diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index ded140e8d8..1a7947e019 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -373,6 +373,32 @@ static int riscv_gdb_set_csr(CPURISCVState *env, uint8_t *mem_buf, int n) return 0; } +static int riscv_gdb_get_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n) +{ + if (n == 0) { +#ifdef CONFIG_USER_ONLY + return gdb_get_regl(mem_buf, 0); +#else + return gdb_get_regl(mem_buf, cs->priv); +#endif + } + return 0; +} + +static int riscv_gdb_set_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n) +{ + if (n == 0) { +#ifndef CONFIG_USER_ONLY + cs->priv = ldtul_p(mem_buf) & 0x3; + if (cs->priv == PRV_H) { + cs->priv = PRV_S; + } +#endif + return sizeof(target_ulong); + } + return 0; +} + void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); @@ -384,7 +410,10 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) } gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, - 4096, "riscv-32bit-csr.xml", 0); + 240, "riscv-32bit-csr.xml", 0); + + gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, + 1, "riscv-32bit-virtual.xml", 0); #elif defined(TARGET_RISCV64) if (env->misa & RVF) { gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu, @@ -392,6 +421,9 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) } gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, - 4096, "riscv-64bit-csr.xml", 0); + 240, "riscv-64bit-csr.xml", 0); + + gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, + 1, "riscv-64bit-virtual.xml", 0); #endif } diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index d4f1007109..0e6b640fbd 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -223,6 +223,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, { int i = 0; int ret = -1; + int pmp_size = 0; target_ulong s = 0; target_ulong e = 0; pmp_priv_t allowed_privs = 0; @@ -232,11 +233,21 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, return true; } + /* + * if size is unknown (0), assume that all bytes + * from addr to the end of the page will be accessed. + */ + if (size == 0) { + pmp_size = -(addr | TARGET_PAGE_MASK); + } else { + pmp_size = size; + } + /* 1.10 draft priv spec states there is an implicit order from low to high */ for (i = 0; i < MAX_RISCV_PMPS; i++) { s = pmp_is_in_range(env, i, addr); - e = pmp_is_in_range(env, i, addr + size - 1); + e = pmp_is_in_range(env, i, addr + pmp_size - 1); /* partially inside */ if ((s + e) == 1) { |