diff options
-rw-r--r-- | hw/ppc/spapr_hcall.c | 60 | ||||
-rw-r--r-- | target-ppc/kvm_ppc.h | 36 |
2 files changed, 94 insertions, 2 deletions
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 6e9b6be58c..1733482de6 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -386,6 +386,65 @@ static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, return H_SUCCESS; } +static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong flags = args[0]; + hwaddr dst = args[1]; + hwaddr src = args[2]; + hwaddr len = TARGET_PAGE_SIZE; + uint8_t *pdst, *psrc; + target_long ret = H_SUCCESS; + + if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE + | H_COPY_PAGE | H_ZERO_PAGE)) { + qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n", + flags); + return H_PARAMETER; + } + + /* Map-in destination */ + if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) { + return H_PARAMETER; + } + pdst = cpu_physical_memory_map(dst, &len, 1); + if (!pdst || len != TARGET_PAGE_SIZE) { + return H_PARAMETER; + } + + if (flags & H_COPY_PAGE) { + /* Map-in source, copy to destination, and unmap source again */ + if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) { + ret = H_PARAMETER; + goto unmap_out; + } + psrc = cpu_physical_memory_map(src, &len, 0); + if (!psrc || len != TARGET_PAGE_SIZE) { + ret = H_PARAMETER; + goto unmap_out; + } + memcpy(pdst, psrc, len); + cpu_physical_memory_unmap(psrc, len, 0, len); + } else if (flags & H_ZERO_PAGE) { + memset(pdst, 0, len); /* Just clear the destination page */ + } + + if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) { + kvmppc_dcbst_range(cpu, pdst, len); + } + if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) { + if (kvm_enabled()) { + kvmppc_icbi_range(cpu, pdst, len); + } else { + tb_flush(CPU(cpu)); + } + } + +unmap_out: + cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len); + return ret; +} + #define FLAGS_REGISTER_VPA 0x0000200000000000ULL #define FLAGS_REGISTER_DTL 0x0000400000000000ULL #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL @@ -1045,6 +1104,7 @@ static void hypercall_register_types(void) spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0); spapr_register_hypercall(H_SET_DABR, h_set_dabr); spapr_register_hypercall(H_SET_XDABR, h_set_xdabr); + spapr_register_hypercall(H_PAGE_INIT, h_page_init); spapr_register_hypercall(H_SET_MODE, h_set_mode); /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h index aaa828c055..fd64c44f4d 100644 --- a/target-ppc/kvm_ppc.h +++ b/target-ppc/kvm_ppc.h @@ -249,15 +249,47 @@ static inline int kvmppc_enable_hwrng(void) #endif #ifndef CONFIG_KVM + #define kvmppc_eieio() do { } while (0) -#else + +static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) +{ +} + +static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) +{ +} + +#else /* CONFIG_KVM */ + #define kvmppc_eieio() \ do { \ if (kvm_enabled()) { \ asm volatile("eieio" : : : "memory"); \ } \ } while (0) -#endif + +/* Store data cache blocks back to memory */ +static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) +{ + uint8_t *p; + + for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) { + asm volatile("dcbst 0,%0" : : "r"(p) : "memory"); + } +} + +/* Invalidate instruction cache blocks */ +static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) +{ + uint8_t *p; + + for (p = addr; p < addr + len; p += cpu->env.icache_line_size) { + asm volatile("icbi 0,%0" : : "r"(p)); + } +} + +#endif /* CONFIG_KVM */ #ifndef KVM_INTERRUPT_SET #define KVM_INTERRUPT_SET -1 |