aboutsummaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/cpu.h1
-rw-r--r--target-ppc/int_helper.c127
-rw-r--r--target-ppc/kvm.c139
-rw-r--r--target-ppc/kvm_ppc.h19
-rw-r--r--target-ppc/machine.c4
-rw-r--r--target-ppc/translate_init.c8
6 files changed, 152 insertions, 146 deletions
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index ca2fc2198e..faf4404078 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -1079,7 +1079,6 @@ struct CPUPPCState {
int mmu_idx; /* precomputed MMU index to speed up mem accesses */
/* Power management */
- int power_mode;
int (*check_pow)(CPUPPCState *env);
#if !defined(CONFIG_USER_ONLY)
diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c
index f638b2a07c..f39b4f682a 100644
--- a/target-ppc/int_helper.c
+++ b/target-ppc/int_helper.c
@@ -287,23 +287,6 @@ target_ulong helper_602_mfrom(target_ulong arg)
for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
#endif
-/* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
- * execute the following block. */
-#define DO_HANDLE_NAN(result, x) \
- if (float32_is_any_nan(x)) { \
- CPU_FloatU __f; \
- __f.f = x; \
- __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
- result = __f.f; \
- } else
-
-#define HANDLE_NAN1(result, x) \
- DO_HANDLE_NAN(result, x)
-#define HANDLE_NAN2(result, x, y) \
- DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
-#define HANDLE_NAN3(result, x, y, z) \
- DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
-
/* Saturating arithmetic helpers. */
#define SATCVT(from, to, from_type, to_type, min, max) \
static inline to_type cvt##from##to(from_type x, int *sat) \
@@ -409,15 +392,29 @@ VARITH(uwm, u32)
int i; \
\
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
- HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
- r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
- } \
+ r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
} \
}
VARITHFP(addfp, float32_add)
VARITHFP(subfp, float32_sub)
+VARITHFP(minfp, float32_min)
+VARITHFP(maxfp, float32_max)
#undef VARITHFP
+#define VARITHFPFMA(suffix, type) \
+ void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
+ ppc_avr_t *b, ppc_avr_t *c) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i], \
+ type, &env->vec_status); \
+ } \
+ }
+VARITHFPFMA(maddfp, 0);
+VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
+#undef VARITHFPFMA
+
#define VARITHSAT_CASE(type, op, cvt, element) \
{ \
type result = (type)a->element[i] op (type)b->element[i]; \
@@ -649,27 +646,6 @@ VCT(uxs, cvtsduw, u32)
VCT(sxs, cvtsdsw, s32)
#undef VCT
-void helper_vmaddfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
- ppc_avr_t *c)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(r->f); i++) {
- HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
- /* Need to do the computation in higher precision and round
- * once at the end. */
- float64 af, bf, cf, t;
-
- af = float32_to_float64(a->f[i], &env->vec_status);
- bf = float32_to_float64(b->f[i], &env->vec_status);
- cf = float32_to_float64(c->f[i], &env->vec_status);
- t = float64_mul(af, cf, &env->vec_status);
- t = float64_add(t, bf, &env->vec_status);
- r->f[i] = float64_to_float32(t, &env->vec_status);
- }
- }
-}
-
void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
ppc_avr_t *b, ppc_avr_t *c)
{
@@ -730,27 +706,6 @@ VMINMAX(uw, u32)
#undef VMINMAX_DO
#undef VMINMAX
-#define VMINMAXFP(suffix, rT, rF) \
- void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
- ppc_avr_t *b) \
- { \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
- HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
- if (float32_lt_quiet(a->f[i], b->f[i], \
- &env->vec_status)) { \
- r->f[i] = rT->f[i]; \
- } else { \
- r->f[i] = rF->f[i]; \
- } \
- } \
- } \
- }
-VMINMAXFP(minfp, a, b)
-VMINMAXFP(maxfp, b, a)
-#undef VMINMAXFP
-
void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
int i;
@@ -930,28 +885,6 @@ VMUL(uh, u16, u32)
#undef VMUL_DO
#undef VMUL
-void helper_vnmsubfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
- ppc_avr_t *b, ppc_avr_t *c)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(r->f); i++) {
- HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
- /* Need to do the computation is higher precision and round
- * once at the end. */
- float64 af, bf, cf, t;
-
- af = float32_to_float64(a->f[i], &env->vec_status);
- bf = float32_to_float64(b->f[i], &env->vec_status);
- cf = float32_to_float64(c->f[i], &env->vec_status);
- t = float64_mul(af, cf, &env->vec_status);
- t = float64_sub(t, bf, &env->vec_status);
- t = float64_chs(t);
- r->f[i] = float64_to_float32(t, &env->vec_status);
- }
- }
-}
-
void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
ppc_avr_t *c)
{
@@ -1039,9 +972,7 @@ void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
int i;
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
- HANDLE_NAN1(r->f[i], b->f[i]) {
- r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
- }
+ r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
}
}
@@ -1054,9 +985,7 @@ void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
\
set_float_rounding_mode(rounding, &s); \
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
- HANDLE_NAN1(r->f[i], b->f[i]) { \
- r->f[i] = float32_round_to_int (b->f[i], &s); \
- } \
+ r->f[i] = float32_round_to_int (b->f[i], &s); \
} \
}
VRFI(n, float_round_nearest_even)
@@ -1089,11 +1018,9 @@ void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
int i;
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
- HANDLE_NAN1(r->f[i], b->f[i]) {
- float32 t = float32_sqrt(b->f[i], &env->vec_status);
+ float32 t = float32_sqrt(b->f[i], &env->vec_status);
- r->f[i] = float32_div(float32_one, t, &env->vec_status);
- }
+ r->f[i] = float32_div(float32_one, t, &env->vec_status);
}
}
@@ -1109,9 +1036,7 @@ void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
int i;
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
- HANDLE_NAN1(r->f[i], b->f[i]) {
- r->f[i] = float32_exp2(b->f[i], &env->vec_status);
- }
+ r->f[i] = float32_exp2(b->f[i], &env->vec_status);
}
}
@@ -1120,9 +1045,7 @@ void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
int i;
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
- HANDLE_NAN1(r->f[i], b->f[i]) {
- r->f[i] = float32_log2(b->f[i], &env->vec_status);
- }
+ r->f[i] = float32_log2(b->f[i], &env->vec_status);
}
}
@@ -1473,10 +1396,6 @@ VUPK(lsh, s32, s16, UPKLO)
#undef UPKHI
#undef UPKLO
-#undef DO_HANDLE_NAN
-#undef HANDLE_NAN1
-#undef HANDLE_NAN2
-#undef HANDLE_NAN3
#undef VECTOR_FOR_INORDER_I
#undef HI_IDX
#undef LO_IDX
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index 7f6e4e0b87..5cbe98a164 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -60,6 +60,7 @@ static int cap_booke_sregs;
static int cap_ppc_smt;
static int cap_ppc_rma;
static int cap_spapr_tce;
+static int cap_hior;
/* XXX We have a race condition where we actually have a level triggered
* interrupt, but the infrastructure can't expose that yet, so the guest
@@ -86,6 +87,7 @@ int kvm_arch_init(KVMState *s)
cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
+ cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
if (!cap_interrupt_level) {
fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
@@ -469,6 +471,54 @@ int kvm_arch_put_registers(CPUPPCState *env, int level)
env->tlb_dirty = false;
}
+ if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
+ struct kvm_sregs sregs;
+
+ sregs.pvr = env->spr[SPR_PVR];
+
+ sregs.u.s.sdr1 = env->spr[SPR_SDR1];
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ for (i = 0; i < 64; i++) {
+ sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
+ sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ sregs.u.s.ppc32.sr[i] = env->sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ /* Beware. We have to swap upper and lower bits here */
+ sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
+ | env->DBAT[1][i];
+ sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
+ | env->IBAT[1][i];
+ }
+
+ ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
+ uint64_t hior = env->spr[SPR_HIOR];
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_HIOR,
+ .addr = (uintptr_t) &hior,
+ };
+
+ ret = kvm_vcpu_ioctl(env, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
return ret;
}
@@ -946,52 +996,14 @@ int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
void kvmppc_set_papr(CPUPPCState *env)
{
struct kvm_enable_cap cap = {};
- struct kvm_one_reg reg = {};
- struct kvm_sregs sregs = {};
int ret;
- uint64_t hior = env->spr[SPR_HIOR];
cap.cap = KVM_CAP_PPC_PAPR;
ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &cap);
if (ret) {
- goto fail;
- }
-
- /*
- * XXX We set HIOR here. It really should be a qdev property of
- * the CPU node, but we don't have CPUs converted to qdev yet.
- *
- * Once we have qdev CPUs, move HIOR to a qdev property and
- * remove this chunk.
- */
- reg.id = KVM_REG_PPC_HIOR;
- reg.addr = (uintptr_t)&hior;
- ret = kvm_vcpu_ioctl(env, KVM_SET_ONE_REG, &reg);
- if (ret) {
- fprintf(stderr, "Couldn't set HIOR. Maybe you're running an old \n"
- "kernel with support for HV KVM but no PAPR PR \n"
- "KVM in which case things will work. If they don't \n"
- "please update your host kernel!\n");
- }
-
- /* Set SDR1 so kernel space finds the HTAB */
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
- if (ret) {
- goto fail;
+ cpu_abort(env, "This KVM version does not support PAPR\n");
}
-
- sregs.u.s.sdr1 = env->spr[SPR_SDR1];
-
- ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
- if (ret) {
- goto fail;
- }
-
- return;
-
-fail:
- cpu_abort(env, "This KVM version does not support PAPR\n");
}
int kvmppc_smt_threads(void)
@@ -999,6 +1011,7 @@ int kvmppc_smt_threads(void)
return cap_ppc_smt ? cap_ppc_smt : 1;
}
+#ifdef TARGET_PPC64
off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
{
void *rma;
@@ -1042,6 +1055,16 @@ off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
return size;
}
+uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
+{
+ if (cap_ppc_rma >= 2) {
+ return current_size;
+ }
+ return MIN(current_size,
+ getrampagesize() << (hash_shift - 7));
+}
+#endif
+
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
{
struct kvm_create_spapr_tce args = {
@@ -1101,6 +1124,44 @@ int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size)
return 0;
}
+int kvmppc_reset_htab(int shift_hint)
+{
+ uint32_t shift = shift_hint;
+
+ if (!kvm_enabled()) {
+ /* Full emulation, tell caller to allocate htab itself */
+ return 0;
+ }
+ if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
+ int ret;
+ ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
+ if (ret == -ENOTTY) {
+ /* At least some versions of PR KVM advertise the
+ * capability, but don't implement the ioctl(). Oops.
+ * Return 0 so that we allocate the htab in qemu, as is
+ * correct for PR. */
+ return 0;
+ } else if (ret < 0) {
+ return ret;
+ }
+ return shift;
+ }
+
+ /* We have a kernel that predates the htab reset calls. For PR
+ * KVM, we need to allocate the htab ourselves, for an HV KVM of
+ * this era, it has allocated a 16MB fixed size hash table
+ * already. Kernels of this era have the GET_PVINFO capability
+ * only on PR, so we use this hack to determine the right
+ * answer */
+ if (kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
+ /* PR - tell caller to allocate htab */
+ return 0;
+ } else {
+ /* HV - assume 16MB kernel allocated htab */
+ return 24;
+ }
+}
+
static inline uint32_t mfpvr(void)
{
uint32_t pvr;
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index e2f8703853..baad6eb75b 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -27,6 +27,8 @@ int kvmppc_smt_threads(void);
off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem);
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd);
int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
+int kvmppc_reset_htab(int shift_hint);
+uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift);
#endif /* !CONFIG_USER_ONLY */
const ppc_def_t *kvmppc_host_cpu_def(void);
int kvmppc_fixup_cpu(CPUPPCState *env);
@@ -94,6 +96,23 @@ static inline int kvmppc_remove_spapr_tce(void *table, int pfd,
{
return -1;
}
+
+static inline int kvmppc_reset_htab(int shift_hint)
+{
+ return -1;
+}
+
+static inline uint64_t kvmppc_rma_size(uint64_t current_size,
+ unsigned int hash_shift)
+{
+ return ram_size;
+}
+
+static inline int kvmppc_update_sdr1(CPUPPCState *env)
+{
+ return 0;
+}
+
#endif /* !CONFIG_USER_ONLY */
static inline const ppc_def_t *kvmppc_host_cpu_def(void)
diff --git a/target-ppc/machine.c b/target-ppc/machine.c
index d6c2ee41b3..21ce7575e3 100644
--- a/target-ppc/machine.c
+++ b/target-ppc/machine.c
@@ -82,7 +82,7 @@ void cpu_save(QEMUFile *f, void *opaque)
qemu_put_betls(f, &env->hflags);
qemu_put_betls(f, &env->hflags_nmsr);
qemu_put_sbe32s(f, &env->mmu_idx);
- qemu_put_sbe32s(f, &env->power_mode);
+ qemu_put_sbe32(f, 0);
}
int cpu_load(QEMUFile *f, void *opaque, int version_id)
@@ -167,7 +167,7 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_betls(f, &env->hflags);
qemu_get_betls(f, &env->hflags_nmsr);
qemu_get_sbe32s(f, &env->mmu_idx);
- qemu_get_sbe32s(f, &env->power_mode);
+ qemu_get_sbe32(f); /* Discard unused power_mode */
return 0;
}
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index fba2b42427..a972287035 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -10423,6 +10423,14 @@ static void ppc_cpu_reset(CPUState *s)
env->pending_interrupts = 0;
env->exception_index = POWERPC_EXCP_NONE;
env->error_code = 0;
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+ env->vpa = 0;
+ env->slb_shadow = 0;
+ env->dispatch_trace_log = 0;
+ env->dtl_size = 0;
+#endif /* TARGET_PPC64 */
+
/* Flush all TLBs */
tlb_flush(env, 1);
}