aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpu.h3
-rw-r--r--target/arm/helper.c13
-rw-r--r--target/arm/kvm.c51
-rw-r--r--target/ppc/excp_helper.c1
-rw-r--r--target/ppc/kvm.c5
-rw-r--r--target/ppc/mmu-radix64.c67
-rw-r--r--target/ppc/mmu-radix64.h1
-rw-r--r--target/ppc/mmu_helper.c3
8 files changed, 125 insertions, 19 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 16a1e59615..102c58afac 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -706,6 +706,9 @@ struct ARMCPU {
void *el_change_hook_opaque;
int32_t node_id; /* NUMA node this CPU belongs to */
+
+ /* Used to synchronize KVM and QEMU in-kernel device levels */
+ uint8_t device_irq_level;
};
static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 2594faa9b8..4ed32c56b8 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -8768,9 +8768,16 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
}
break;
case 20: /* CONTROL */
- switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
- env->v7m.control = val & (R_V7M_CONTROL_SPSEL_MASK |
- R_V7M_CONTROL_NPRIV_MASK);
+ /* Writing to the SPSEL bit only has an effect if we are in
+ * thread mode; other bits can be updated by any privileged code.
+ * switch_v7m_sp() deals with updating the SPSEL bit in
+ * env->v7m.control, so we only need update the others.
+ */
+ if (env->v7m.exception == 0) {
+ switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
+ }
+ env->v7m.control &= ~R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control |= val & R_V7M_CONTROL_NPRIV_MASK;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 45554682f2..7c17f0d629 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -174,6 +174,12 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
*/
kvm_async_interrupts_allowed = true;
+ /*
+ * PSCI wakes up secondary cores, so we always need to
+ * have vCPUs waiting in kernel space
+ */
+ kvm_halt_in_kernel_allowed = true;
+
cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
type_register_static(&host_arm_cpu_type_info);
@@ -528,6 +534,51 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
{
+ ARMCPU *cpu;
+ uint32_t switched_level;
+
+ if (kvm_irqchip_in_kernel()) {
+ /*
+ * We only need to sync timer states with user-space interrupt
+ * controllers, so return early and save cycles if we don't.
+ */
+ return MEMTXATTRS_UNSPECIFIED;
+ }
+
+ cpu = ARM_CPU(cs);
+
+ /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */
+ if (run->s.regs.device_irq_level != cpu->device_irq_level) {
+ switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
+
+ qemu_mutex_lock_iothread();
+
+ if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
+ qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
+ !!(run->s.regs.device_irq_level &
+ KVM_ARM_DEV_EL1_VTIMER));
+ switched_level &= ~KVM_ARM_DEV_EL1_VTIMER;
+ }
+
+ if (switched_level & KVM_ARM_DEV_EL1_PTIMER) {
+ qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS],
+ !!(run->s.regs.device_irq_level &
+ KVM_ARM_DEV_EL1_PTIMER));
+ switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
+ }
+
+ /* XXX PMU IRQ is missing */
+
+ if (switched_level) {
+ qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
+ __func__, switched_level);
+ }
+
+ /* We also mark unknown levels as processed to not waste cycles */
+ cpu->device_irq_level = run->s.regs.device_irq_level;
+ qemu_mutex_unlock_iothread();
+ }
+
return MEMTXATTRS_UNSPECIFIED;
}
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 3a9f0861e7..e6009e70e5 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -283,6 +283,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
* precise in the MSR.
*/
msr |= 0x00100000;
+ env->spr[SPR_BOOKE_ESR] = ESR_FP;
break;
case POWERPC_EXCP_INVAL:
LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index f2f7c531bc..f7a7ea5858 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -2445,6 +2445,7 @@ static int kvm_ppc_register_host_cpu_type(void)
.class_init = kvmppc_host_cpu_class_init,
};
PowerPCCPUClass *pvr_pcc;
+ ObjectClass *oc;
DeviceClass *dc;
int i;
@@ -2455,6 +2456,9 @@ static int kvm_ppc_register_host_cpu_type(void)
type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
type_register(&type_info);
+ oc = object_class_by_name(type_info.name);
+ g_assert(oc);
+
#if defined(TARGET_PPC64)
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
type_info.parent = TYPE_SPAPR_CPU_CORE,
@@ -2474,7 +2478,6 @@ static int kvm_ppc_register_host_cpu_type(void)
dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
if (strcmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
- ObjectClass *oc = OBJECT_CLASS(pvr_pcc);
char *suffix;
ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 69fde65276..bbd37e3c7d 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -147,11 +147,10 @@ static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
}
}
-static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr,
+static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
uint64_t base_addr, uint64_t nls,
hwaddr *raddr, int *psize,
- int *fault_cause, int *prot,
- hwaddr *pte_addr)
+ int *fault_cause, hwaddr *pte_addr)
{
CPUState *cs = CPU(cpu);
uint64_t index, pde;
@@ -177,10 +176,6 @@ static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr,
uint64_t rpn = pde & R_PTE_RPN;
uint64_t mask = (1UL << *psize) - 1;
- if (ppc_radix64_check_prot(cpu, rwx, pde, fault_cause, prot)) {
- return 0; /* Protection Denied Access */
- }
-
/* Or high bits of rpn and low bits to ea to form whole real addr */
*raddr = (rpn & ~mask) | (eaddr & mask);
*pte_addr = base_addr + (index * sizeof(pde));
@@ -188,9 +183,8 @@ static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr,
}
/* Next Level of Radix Tree */
- return ppc_radix64_walk_tree(cpu, rwx, eaddr, pde & R_PDE_NLB,
- pde & R_PDE_NLS, raddr, psize,
- fault_cause, prot, pte_addr);
+ return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS,
+ raddr, psize, fault_cause, pte_addr);
}
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
@@ -241,11 +235,11 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
/* Walk Radix Tree from Process Table Entry to Convert EA to RA */
page_size = PRTBE_R_GET_RTS(prtbe0);
- pte = ppc_radix64_walk_tree(cpu, rwx, eaddr & R_EADDR_MASK,
+ pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
- &raddr, &page_size, &fault_cause, &prot,
- &pte_addr);
- if (!pte) {
+ &raddr, &page_size, &fault_cause, &pte_addr);
+ if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) {
+ /* Couldn't get pte or access denied due to protection */
ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
return 1;
}
@@ -257,3 +251,48 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
prot, mmu_idx, 1UL << page_size);
return 0;
}
+
+hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ hwaddr raddr, pte_addr;
+ uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte;
+ int page_size, fault_cause = 0;
+
+ /* Handle Real Mode */
+ if (msr_dr == 0) {
+ /* In real mode top 4 effective addr bits (mostly) ignored */
+ return eaddr & 0x0FFFFFFFFFFFFFFFULL;
+ }
+
+ /* Virtual Mode Access - get the fully qualified address */
+ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
+ return -1;
+ }
+
+ /* Get Process Table */
+ patbe = vhc->get_patbe(cpu->vhyp);
+
+ /* Index Process Table by PID to Find Corresponding Process Table Entry */
+ offset = pid * sizeof(struct prtb_entry);
+ size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12);
+ if (offset >= size) {
+ /* offset exceeds size of the process table */
+ return -1;
+ }
+ prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset);
+
+ /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
+ page_size = PRTBE_R_GET_RTS(prtbe0);
+ pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
+ prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
+ &raddr, &page_size, &fault_cause, &pte_addr);
+ if (!pte) {
+ return -1;
+ }
+
+ return raddr & TARGET_PAGE_MASK;
+}
diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-radix64.h
index 1d5c7cfea5..0ecf063a17 100644
--- a/target/ppc/mmu-radix64.h
+++ b/target/ppc/mmu-radix64.h
@@ -46,6 +46,7 @@
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
int mmu_idx);
+hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
static inline int ppc_radix64_get_prot_eaa(uint64_t pte)
{
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index 65d1c8692d..b7b9088842 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -30,6 +30,7 @@
#include "helper_regs.h"
#include "qemu/error-report.h"
#include "mmu-book3s-v3.h"
+#include "mmu-radix64.h"
//#define DEBUG_MMU
//#define DEBUG_BATS
@@ -1432,7 +1433,7 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return ppc_hash64_get_phys_page_debug(cpu, addr);
case POWERPC_MMU_VER_3_00:
if (ppc64_radix_guest(ppc_env_get_cpu(env))) {
- /* TODO - Unsupported */
+ return ppc_radix64_get_phys_page_debug(cpu, addr);
} else {
return ppc_hash64_get_phys_page_debug(cpu, addr);
}