diff options
Diffstat (limited to 'target/ppc')
-rw-r--r-- | target/ppc/cpu-qom.h | 5 | ||||
-rw-r--r-- | target/ppc/cpu.h | 20 | ||||
-rw-r--r-- | target/ppc/fpu_helper.c | 312 | ||||
-rw-r--r-- | target/ppc/helper.h | 11 | ||||
-rw-r--r-- | target/ppc/internal.h | 3 | ||||
-rw-r--r-- | target/ppc/kvm.c | 32 | ||||
-rw-r--r-- | target/ppc/kvm_ppc.h | 7 | ||||
-rw-r--r-- | target/ppc/mmu-hash64.c | 44 | ||||
-rw-r--r-- | target/ppc/mmu_helper.c | 4 | ||||
-rw-r--r-- | target/ppc/translate.c | 153 | ||||
-rw-r--r-- | target/ppc/translate/vsx-impl.inc.c | 11 | ||||
-rw-r--r-- | target/ppc/translate/vsx-ops.inc.c | 21 | ||||
-rw-r--r-- | target/ppc/translate_init.c | 27 |
13 files changed, 615 insertions, 35 deletions
diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index b7977bad18..4e3132b56b 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -86,10 +86,13 @@ enum powerpc_mmu_t { POWERPC_MMU_2_07 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG | POWERPC_MMU_64K | POWERPC_MMU_AMR | 0x00000004, - /* FIXME Add POWERPC_MMU_3_OO defines */ /* Architecture 2.07 "degraded" (no 1T segments) */ POWERPC_MMU_2_07a = POWERPC_MMU_64 | POWERPC_MMU_AMR | 0x00000004, + /* Architecture 3.00 variant */ + POWERPC_MMU_3_00 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG + | POWERPC_MMU_64K + | POWERPC_MMU_AMR | 0x00000005, }; /*****************************************************************************/ diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index bc2a2ce431..425e79d52d 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -381,15 +381,22 @@ struct ppc_slb_t { #define LPCR_ISL (1ull << (63 - 2)) #define LPCR_KBV (1ull << (63 - 3)) #define LPCR_DPFD_SHIFT (63 - 11) -#define LPCR_DPFD (0x3ull << LPCR_DPFD_SHIFT) +#define LPCR_DPFD (0x7ull << LPCR_DPFD_SHIFT) #define LPCR_VRMASD_SHIFT (63 - 16) #define LPCR_VRMASD (0x1full << LPCR_VRMASD_SHIFT) +/* P9: Power-saving mode Exit Cause Enable (Upper Section) Mask */ +#define LPCR_PECE_U_SHIFT (63 - 19) +#define LPCR_PECE_U_MASK (0x7ull << LPCR_PECE_U_SHIFT) +#define LPCR_HVEE (1ull << (63 - 17)) /* Hypervisor Virt Exit Enable */ #define LPCR_RMLS_SHIFT (63 - 37) #define LPCR_RMLS (0xfull << LPCR_RMLS_SHIFT) #define LPCR_ILE (1ull << (63 - 38)) #define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */ #define LPCR_AIL (3ull << LPCR_AIL_SHIFT) +#define LPCR_UPRT (1ull << (63 - 41)) /* Use Process Table */ +#define LPCR_EVIRT (1ull << (63 - 42)) /* Enhanced Virtualisation */ #define LPCR_ONL (1ull << (63 - 45)) +#define LPCR_LD (1ull << (63 - 46)) /* Large Decrementer */ #define LPCR_P7_PECE0 (1ull << (63 - 49)) #define LPCR_P7_PECE1 (1ull << (63 - 50)) #define LPCR_P7_PECE2 (1ull << (63 - 51)) @@ -398,11 +405,22 @@ struct ppc_slb_t { #define LPCR_P8_PECE2 (1ull << (63 - 49)) #define LPCR_P8_PECE3 (1ull << (63 - 50)) #define LPCR_P8_PECE4 (1ull << (63 - 51)) +/* P9: Power-saving mode Exit Cause Enable (Lower Section) Mask */ +#define LPCR_PECE_L_SHIFT (63 - 51) +#define LPCR_PECE_L_MASK (0x1full << LPCR_PECE_L_SHIFT) +#define LPCR_PDEE (1ull << (63 - 47)) /* Privileged Doorbell Exit EN */ +#define LPCR_HDEE (1ull << (63 - 48)) /* Hyperv Doorbell Exit Enable */ +#define LPCR_EEE (1ull << (63 - 49)) /* External Exit Enable */ +#define LPCR_DEE (1ull << (63 - 50)) /* Decrementer Exit Enable */ +#define LPCR_OEE (1ull << (63 - 51)) /* Other Exit Enable */ #define LPCR_MER (1ull << (63 - 52)) +#define LPCR_GTSE (1ull << (63 - 53)) /* Guest Translation Shootdown */ #define LPCR_TC (1ull << (63 - 54)) +#define LPCR_HEIC (1ull << (63 - 59)) /* HV Extern Interrupt Control */ #define LPCR_LPES0 (1ull << (63 - 60)) #define LPCR_LPES1 (1ull << (63 - 61)) #define LPCR_RMI (1ull << (63 - 62)) +#define LPCR_HVICE (1ull << (63 - 62)) /* HV Virtualisation Int Enable */ #define LPCR_HDICE (1ull << (63 - 63)) #define msr_sf ((env->msr >> MSR_SF) & 1) diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c index 9f5cafd5ba..58aee640c3 100644 --- a/target/ppc/fpu_helper.c +++ b/target/ppc/fpu_helper.c @@ -1850,12 +1850,11 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode) getVSR(rD(opcode) + 32, &xt, env); helper_reset_fpstatus(env); + tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { - /* TODO: Support xsadddpo after round-to-odd is implemented */ - abort(); + tstat.float_rounding_mode = float_round_to_odd; } - tstat = env->fp_status; set_float_exception_flags(0, &tstat); xt.f128 = float128_add(xa.f128, xb.f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; @@ -1930,19 +1929,18 @@ VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0) void helper_xsmulqp(CPUPPCState *env, uint32_t opcode) { ppc_vsr_t xt, xa, xb; + float_status tstat; getVSR(rA(opcode) + 32, &xa, env); getVSR(rB(opcode) + 32, &xb, env); getVSR(rD(opcode) + 32, &xt, env); + helper_reset_fpstatus(env); + tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { - /* TODO: Support xsmulpo after round-to-odd is implemented */ - abort(); + tstat.float_rounding_mode = float_round_to_odd; } - helper_reset_fpstatus(env); - - float_status tstat = env->fp_status; set_float_exception_flags(0, &tstat); xt.f128 = float128_mul(xa.f128, xb.f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; @@ -2019,18 +2017,18 @@ VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0) void helper_xsdivqp(CPUPPCState *env, uint32_t opcode) { ppc_vsr_t xt, xa, xb; + float_status tstat; getVSR(rA(opcode) + 32, &xa, env); getVSR(rB(opcode) + 32, &xb, env); getVSR(rD(opcode) + 32, &xt, env); + helper_reset_fpstatus(env); + tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { - /* TODO: Support xsdivqpo after round-to-odd is implemented */ - abort(); + tstat.float_rounding_mode = float_round_to_odd; } - helper_reset_fpstatus(env); - float_status tstat = env->fp_status; set_float_exception_flags(0, &tstat); xt.f128 = float128_div(xa.f128, xb.f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; @@ -2679,6 +2677,99 @@ VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0)) VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i)) VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i)) +#define VSX_MAX_MINC(name, max) \ +void helper_##name(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xa, xb; \ + bool vxsnan_flag = false, vex_flag = false; \ + \ + getVSR(rA(opcode) + 32, &xa, env); \ + getVSR(rB(opcode) + 32, &xb, env); \ + getVSR(rD(opcode) + 32, &xt, env); \ + \ + if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \ + float64_is_any_nan(xb.VsrD(0)))) { \ + if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \ + float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \ + vxsnan_flag = true; \ + } \ + xt.VsrD(0) = xb.VsrD(0); \ + } else if ((max && \ + !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \ + (!max && \ + float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \ + xt.VsrD(0) = xa.VsrD(0); \ + } else { \ + xt.VsrD(0) = xb.VsrD(0); \ + } \ + \ + vex_flag = fpscr_ve & vxsnan_flag; \ + if (vxsnan_flag) { \ + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ + } \ + if (!vex_flag) { \ + putVSR(rD(opcode) + 32, &xt, env); \ + } \ +} \ + +VSX_MAX_MINC(xsmaxcdp, 1); +VSX_MAX_MINC(xsmincdp, 0); + +#define VSX_MAX_MINJ(name, max) \ +void helper_##name(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xa, xb; \ + bool vxsnan_flag = false, vex_flag = false; \ + \ + getVSR(rA(opcode) + 32, &xa, env); \ + getVSR(rB(opcode) + 32, &xb, env); \ + getVSR(rD(opcode) + 32, &xt, env); \ + \ + if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \ + if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \ + vxsnan_flag = true; \ + } \ + xt.VsrD(0) = xa.VsrD(0); \ + } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \ + if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \ + vxsnan_flag = true; \ + } \ + xt.VsrD(0) = xb.VsrD(0); \ + } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \ + if (max) { \ + if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \ + xt.VsrD(0) = 0ULL; \ + } else { \ + xt.VsrD(0) = 0x8000000000000000ULL; \ + } \ + } else { \ + if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \ + xt.VsrD(0) = 0x8000000000000000ULL; \ + } else { \ + xt.VsrD(0) = 0ULL; \ + } \ + } \ + } else if ((max && \ + !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \ + (!max && \ + float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \ + xt.VsrD(0) = xa.VsrD(0); \ + } else { \ + xt.VsrD(0) = xb.VsrD(0); \ + } \ + \ + vex_flag = fpscr_ve & vxsnan_flag; \ + if (vxsnan_flag) { \ + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ + } \ + if (!vex_flag) { \ + putVSR(rD(opcode) + 32, &xt, env); \ + } \ +} \ + +VSX_MAX_MINJ(xsmaxjdp, 1); +VSX_MAX_MINJ(xsminjdp, 0); + /* VSX_CMP - VSX floating point compare * op - instruction mnemonic * nels - number of elements (1, 2 or 4) @@ -2861,18 +2952,20 @@ VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0) void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode) { ppc_vsr_t xt, xb; + float_status tstat; getVSR(rB(opcode) + 32, &xb, env); memset(&xt, 0, sizeof(xt)); + tstat = env->fp_status; if (unlikely(Rc(opcode) != 0)) { - /* TODO: Support xscvqpdpo after round-to-odd is implemented */ - abort(); + tstat.float_rounding_mode = float_round_to_odd; } - xt.VsrD(0) = float128_to_float64(xb.f128, &env->fp_status); + xt.VsrD(0) = float128_to_float64(xb.f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(float128_is_signaling_nan(xb.f128, - &env->fp_status))) { + &tstat))) { float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0)); } @@ -2993,6 +3086,8 @@ VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \ VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \ 0xffffffff80000000ULL) +VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL) +VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL) /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion * op - instruction mnemonic @@ -3277,3 +3372,188 @@ void helper_xststdcsp(CPUPPCState *env, uint32_t opcode) env->fpscr |= cc << FPSCR_FPRF; env->crf[BF(opcode)] = cc; } + +void helper_xsrqpi(CPUPPCState *env, uint32_t opcode) +{ + ppc_vsr_t xb; + ppc_vsr_t xt; + uint8_t r = Rrm(opcode); + uint8_t ex = Rc(opcode); + uint8_t rmc = RMC(opcode); + uint8_t rmode = 0; + float_status tstat; + + getVSR(rB(opcode) + 32, &xb, env); + memset(&xt, 0, sizeof(xt)); + helper_reset_fpstatus(env); + + if (r == 0 && rmc == 0) { + rmode = float_round_ties_away; + } else if (r == 0 && rmc == 0x3) { + rmode = fpscr_rn; + } else if (r == 1) { + switch (rmc) { + case 0: + rmode = float_round_nearest_even; + break; + case 1: + rmode = float_round_to_zero; + break; + case 2: + rmode = float_round_up; + break; + case 3: + rmode = float_round_down; + break; + default: + abort(); + } + } + + tstat = env->fp_status; + set_float_exception_flags(0, &tstat); + set_float_rounding_mode(rmode, &tstat); + xt.f128 = float128_round_to_int(xb.f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + if (float128_is_signaling_nan(xb.f128, &tstat)) { + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); + xt.f128 = float128_snan_to_qnan(xt.f128); + } + } + + if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) { + env->fp_status.float_exception_flags &= ~float_flag_inexact; + } + + helper_compute_fprf_float128(env, xt.f128); + float_check_status(env); + putVSR(rD(opcode) + 32, &xt, env); +} + +void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode) +{ + ppc_vsr_t xb; + ppc_vsr_t xt; + uint8_t r = Rrm(opcode); + uint8_t rmc = RMC(opcode); + uint8_t rmode = 0; + floatx80 round_res; + float_status tstat; + + getVSR(rB(opcode) + 32, &xb, env); + memset(&xt, 0, sizeof(xt)); + helper_reset_fpstatus(env); + + if (r == 0 && rmc == 0) { + rmode = float_round_ties_away; + } else if (r == 0 && rmc == 0x3) { + rmode = fpscr_rn; + } else if (r == 1) { + switch (rmc) { + case 0: + rmode = float_round_nearest_even; + break; + case 1: + rmode = float_round_to_zero; + break; + case 2: + rmode = float_round_up; + break; + case 3: + rmode = float_round_down; + break; + default: + abort(); + } + } + + tstat = env->fp_status; + set_float_exception_flags(0, &tstat); + set_float_rounding_mode(rmode, &tstat); + round_res = float128_to_floatx80(xb.f128, &tstat); + xt.f128 = floatx80_to_float128(round_res, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + if (float128_is_signaling_nan(xb.f128, &tstat)) { + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); + xt.f128 = float128_snan_to_qnan(xt.f128); + } + } + + helper_compute_fprf_float128(env, xt.f128); + putVSR(rD(opcode) + 32, &xt, env); + float_check_status(env); +} + +void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode) +{ + ppc_vsr_t xb; + ppc_vsr_t xt; + float_status tstat; + + getVSR(rB(opcode) + 32, &xb, env); + memset(&xt, 0, sizeof(xt)); + helper_reset_fpstatus(env); + + tstat = env->fp_status; + if (unlikely(Rc(opcode) != 0)) { + tstat.float_rounding_mode = float_round_to_odd; + } + + set_float_exception_flags(0, &tstat); + xt.f128 = float128_sqrt(xb.f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + if (float128_is_signaling_nan(xb.f128, &tstat)) { + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); + xt.f128 = float128_snan_to_qnan(xb.f128); + } else if (float128_is_quiet_nan(xb.f128, &tstat)) { + xt.f128 = xb.f128; + } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) { + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1); + set_snan_bit_is_one(0, &env->fp_status); + xt.f128 = float128_default_nan(&env->fp_status); + } + } + + helper_compute_fprf_float128(env, xt.f128); + putVSR(rD(opcode) + 32, &xt, env); + float_check_status(env); +} + +void helper_xssubqp(CPUPPCState *env, uint32_t opcode) +{ + ppc_vsr_t xt, xa, xb; + float_status tstat; + + getVSR(rA(opcode) + 32, &xa, env); + getVSR(rB(opcode) + 32, &xb, env); + getVSR(rD(opcode) + 32, &xt, env); + helper_reset_fpstatus(env); + + tstat = env->fp_status; + if (unlikely(Rc(opcode) != 0)) { + tstat.float_rounding_mode = float_round_to_odd; + } + + set_float_exception_flags(0, &tstat); + xt.f128 = float128_sub(xa.f128, xb.f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) { + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); + } else if (float128_is_signaling_nan(xa.f128, &tstat) || + float128_is_signaling_nan(xb.f128, &tstat)) { + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); + } + } + + helper_compute_fprf_float128(env, xt.f128); + putVSR(rD(opcode) + 32, &xt, env); + float_check_status(env); +} diff --git a/target/ppc/helper.h b/target/ppc/helper.h index 85af9df36d..6d77661f7c 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -431,6 +431,10 @@ DEF_HELPER_2(xscmpoqp, void, env, i32) DEF_HELPER_2(xscmpuqp, void, env, i32) DEF_HELPER_2(xsmaxdp, void, env, i32) DEF_HELPER_2(xsmindp, void, env, i32) +DEF_HELPER_2(xsmaxcdp, void, env, i32) +DEF_HELPER_2(xsmincdp, void, env, i32) +DEF_HELPER_2(xsmaxjdp, void, env, i32) +DEF_HELPER_2(xsminjdp, void, env, i32) DEF_HELPER_2(xscvdphp, void, env, i32) DEF_HELPER_2(xscvdpqp, void, env, i32) DEF_HELPER_2(xscvdpsp, void, env, i32) @@ -438,6 +442,8 @@ DEF_HELPER_2(xscvdpspn, i64, env, i64) DEF_HELPER_2(xscvqpdp, void, env, i32) DEF_HELPER_2(xscvqpsdz, void, env, i32) DEF_HELPER_2(xscvqpswz, void, env, i32) +DEF_HELPER_2(xscvqpudz, void, env, i32) +DEF_HELPER_2(xscvqpuwz, void, env, i32) DEF_HELPER_2(xscvhpdp, void, env, i32) DEF_HELPER_2(xscvsdqp, void, env, i32) DEF_HELPER_2(xscvspdp, void, env, i32) @@ -459,6 +465,10 @@ DEF_HELPER_2(xsrdpic, void, env, i32) DEF_HELPER_2(xsrdpim, void, env, i32) DEF_HELPER_2(xsrdpip, void, env, i32) DEF_HELPER_2(xsrdpiz, void, env, i32) +DEF_HELPER_2(xsrqpi, void, env, i32) +DEF_HELPER_2(xsrqpxp, void, env, i32) +DEF_HELPER_2(xssqrtqp, void, env, i32) +DEF_HELPER_2(xssubqp, void, env, i32) DEF_HELPER_2(xsaddsp, void, env, i32) DEF_HELPER_2(xssubsp, void, env, i32) @@ -661,6 +671,7 @@ DEF_HELPER_2(load_slb_vsid, tl, env, tl) DEF_HELPER_2(find_slb_vsid, tl, env, tl) DEF_HELPER_FLAGS_1(slbia, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(slbieg, TCG_CALL_NO_RWG, void, env, tl) #endif DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl) DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl) diff --git a/target/ppc/internal.h b/target/ppc/internal.h index 5a2fd68427..1f441c6483 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -133,6 +133,8 @@ EXTRACT_HELPER(UIMM4, 16, 4); EXTRACT_HELPER(NB, 11, 5); /* Shift count */ EXTRACT_HELPER(SH, 11, 5); +/* lwat/stwat/ldat/lwat */ +EXTRACT_HELPER(FC, 11, 5); /* Vector shift count */ EXTRACT_HELPER(VSH, 6, 4); /* Mask start */ @@ -186,6 +188,7 @@ EXTRACT_HELPER(DCM, 10, 6) /* DFP Z23-form */ EXTRACT_HELPER(RMC, 9, 2) +EXTRACT_HELPER(Rrm, 16, 1) EXTRACT_HELPER_SPLIT(DQxT, 3, 1, 21, 5); EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5); diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 663d2e79c9..52bbea514a 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -438,12 +438,13 @@ static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift) return (1ul << shift) <= rampgsize; } +static long max_cpu_page_size; + static void kvm_fixup_page_sizes(PowerPCCPU *cpu) { static struct kvm_ppc_smmu_info smmu_info; static bool has_smmu_info; CPUPPCState *env = &cpu->env; - long rampagesize; int iq, ik, jq, jk; bool has_64k_pages = false; @@ -458,7 +459,9 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu) has_smmu_info = true; } - rampagesize = getrampagesize(); + if (!max_cpu_page_size) { + max_cpu_page_size = getrampagesize(); + } /* Convert to QEMU form */ memset(&env->sps, 0, sizeof(env->sps)); @@ -478,14 +481,14 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu) struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq]; struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik]; - if (!kvm_valid_page_size(smmu_info.flags, rampagesize, + if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size, ksps->page_shift)) { continue; } qsps->page_shift = ksps->page_shift; qsps->slb_enc = ksps->slb_enc; for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) { - if (!kvm_valid_page_size(smmu_info.flags, rampagesize, + if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size, ksps->enc[jk].page_shift)) { continue; } @@ -510,12 +513,33 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu) env->mmu_model &= ~POWERPC_MMU_64K; } } + +bool kvmppc_is_mem_backend_page_size_ok(char *obj_path) +{ + Object *mem_obj = object_resolve_path(obj_path, NULL); + char *mempath = object_property_get_str(mem_obj, "mem-path", NULL); + long pagesize; + + if (mempath) { + pagesize = gethugepagesize(mempath); + } else { + pagesize = getpagesize(); + } + + return pagesize >= max_cpu_page_size; +} + #else /* defined (TARGET_PPC64) */ static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu) { } +bool kvmppc_is_mem_backend_page_size_ok(char *obj_path) +{ + return true; +} + #endif /* !defined (TARGET_PPC64) */ unsigned long kvm_arch_vcpu_id(CPUState *cpu) diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h index 151c00bac7..8da2ee418a 100644 --- a/target/ppc/kvm_ppc.h +++ b/target/ppc/kvm_ppc.h @@ -60,6 +60,8 @@ int kvmppc_enable_hwrng(void); int kvmppc_put_books_sregs(PowerPCCPU *cpu); PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void); +bool kvmppc_is_mem_backend_page_size_ok(char *obj_path); + #else static inline uint32_t kvmppc_get_tbfreq(void) @@ -192,6 +194,11 @@ static inline uint64_t kvmppc_rma_size(uint64_t current_size, return ram_size; } +static inline bool kvmppc_is_mem_backend_page_size_ok(char *obj_path) +{ + return true; +} + #endif /* !CONFIG_USER_ONLY */ static inline bool kvmppc_has_cap_epr(void) diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index bb78fb5497..76669ed82c 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -115,7 +115,8 @@ void helper_slbia(CPUPPCState *env) } } -void helper_slbie(CPUPPCState *env, target_ulong addr) +static void __helper_slbie(CPUPPCState *env, target_ulong addr, + target_ulong global) { PowerPCCPU *cpu = ppc_env_get_cpu(env); ppc_slb_t *slb; @@ -132,10 +133,21 @@ void helper_slbie(CPUPPCState *env, target_ulong addr) * and we still don't have a tlb_flush_mask(env, n, mask) * in QEMU, we just invalidate all TLBs */ - env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + env->tlb_need_flush |= + (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); } } +void helper_slbie(CPUPPCState *env, target_ulong addr) +{ + __helper_slbie(env, addr, false); +} + +void helper_slbieg(CPUPPCState *env, target_ulong addr) +{ + __helper_slbie(env, addr, true); +} + int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, target_ulong esid, target_ulong vsid) { @@ -640,7 +652,15 @@ static void ppc_hash64_set_isi(CPUState *cs, CPUPPCState *env, if (msr_ir) { vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); } else { - vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0); + switch (env->mmu_model) { + case POWERPC_MMU_3_00: + /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */ + vpm = true; + break; + default: + vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0); + break; + } } if (vpm && !msr_hv) { cs->exception_index = POWERPC_EXCP_HISI; @@ -658,7 +678,15 @@ static void ppc_hash64_set_dsi(CPUState *cs, CPUPPCState *env, uint64_t dar, if (msr_dr) { vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); } else { - vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0); + switch (env->mmu_model) { + case POWERPC_MMU_3_00: + /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */ + vpm = true; + break; + default: + vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0); + break; + } } if (vpm && !msr_hv) { cs->exception_index = POWERPC_EXCP_HDSI; @@ -1050,6 +1078,14 @@ void helper_store_lpcr(CPUPPCState *env, target_ulong val) LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 | LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE); break; + case POWERPC_MMU_3_00: /* P9 */ + lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | + (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | + LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | + (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | + LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | + LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE); + break; default: ; } diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index f746f53615..eb2d482ef7 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -825,7 +825,7 @@ static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, tlb = &env->tlb.tlbe[i]; ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, access_type, i); - if (!ret) { + if (ret != -1) { break; } } @@ -1935,6 +1935,7 @@ void ppc_tlb_invalidate_all(CPUPPCState *env) case POWERPC_MMU_2_06a: case POWERPC_MMU_2_07: case POWERPC_MMU_2_07a: + case POWERPC_MMU_3_00: #endif /* defined(TARGET_PPC64) */ env->tlb_need_flush = 0; tlb_flush(CPU(cpu)); @@ -1974,6 +1975,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) case POWERPC_MMU_2_06a: case POWERPC_MMU_2_07: case POWERPC_MMU_2_07a: + case POWERPC_MMU_3_00: /* tlbie invalidate TLBs for all segments */ /* XXX: given the fact that there are too many segments to invalidate, * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, diff --git a/target/ppc/translate.c b/target/ppc/translate.c index b48abaedfb..3ba2616b8a 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -2976,6 +2976,113 @@ LARX(lbarx, DEF_MEMOP(MO_UB)) LARX(lharx, DEF_MEMOP(MO_UW)) LARX(lwarx, DEF_MEMOP(MO_UL)) +#define LD_ATOMIC(name, memop, tp, op, eop) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + int len = MEMOP_GET_SIZE(memop); \ + uint32_t gpr_FC = FC(ctx->opcode); \ + TCGv EA = tcg_temp_local_new(); \ + TCGv_##tp t0, t1; \ + \ + gen_addr_register(ctx, EA); \ + if (len > 1) { \ + gen_check_align(ctx, EA, len - 1); \ + } \ + t0 = tcg_temp_new_##tp(); \ + t1 = tcg_temp_new_##tp(); \ + tcg_gen_##op(t0, cpu_gpr[rD(ctx->opcode) + 1]); \ + \ + switch (gpr_FC) { \ + case 0: /* Fetch and add */ \ + tcg_gen_atomic_fetch_add_##tp(t1, EA, t0, ctx->mem_idx, memop); \ + break; \ + case 1: /* Fetch and xor */ \ + tcg_gen_atomic_fetch_xor_##tp(t1, EA, t0, ctx->mem_idx, memop); \ + break; \ + case 2: /* Fetch and or */ \ + tcg_gen_atomic_fetch_or_##tp(t1, EA, t0, ctx->mem_idx, memop); \ + break; \ + case 3: /* Fetch and 'and' */ \ + tcg_gen_atomic_fetch_and_##tp(t1, EA, t0, ctx->mem_idx, memop); \ + break; \ + case 8: /* Swap */ \ + tcg_gen_atomic_xchg_##tp(t1, EA, t0, ctx->mem_idx, memop); \ + break; \ + case 4: /* Fetch and max unsigned */ \ + case 5: /* Fetch and max signed */ \ + case 6: /* Fetch and min unsigned */ \ + case 7: /* Fetch and min signed */ \ + case 16: /* compare and swap not equal */ \ + case 24: /* Fetch and increment bounded */ \ + case 25: /* Fetch and increment equal */ \ + case 28: /* Fetch and decrement bounded */ \ + gen_invalid(ctx); \ + break; \ + default: \ + /* invoke data storage error handler */ \ + gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); \ + } \ + tcg_gen_##eop(cpu_gpr[rD(ctx->opcode)], t1); \ + tcg_temp_free_##tp(t0); \ + tcg_temp_free_##tp(t1); \ + tcg_temp_free(EA); \ +} + +LD_ATOMIC(lwat, DEF_MEMOP(MO_UL), i32, trunc_tl_i32, extu_i32_tl) +#if defined(TARGET_PPC64) +LD_ATOMIC(ldat, DEF_MEMOP(MO_Q), i64, mov_i64, mov_i64) +#endif + +#define ST_ATOMIC(name, memop, tp, op) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + int len = MEMOP_GET_SIZE(memop); \ + uint32_t gpr_FC = FC(ctx->opcode); \ + TCGv EA = tcg_temp_local_new(); \ + TCGv_##tp t0, t1; \ + \ + gen_addr_register(ctx, EA); \ + if (len > 1) { \ + gen_check_align(ctx, EA, len - 1); \ + } \ + t0 = tcg_temp_new_##tp(); \ + t1 = tcg_temp_new_##tp(); \ + tcg_gen_##op(t0, cpu_gpr[rD(ctx->opcode) + 1]); \ + \ + switch (gpr_FC) { \ + case 0: /* add and Store */ \ + tcg_gen_atomic_add_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \ + break; \ + case 1: /* xor and Store */ \ + tcg_gen_atomic_xor_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \ + break; \ + case 2: /* Or and Store */ \ + tcg_gen_atomic_or_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \ + break; \ + case 3: /* 'and' and Store */ \ + tcg_gen_atomic_and_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \ + break; \ + case 4: /* Store max unsigned */ \ + case 5: /* Store max signed */ \ + case 6: /* Store min unsigned */ \ + case 7: /* Store min signed */ \ + case 24: /* Store twin */ \ + gen_invalid(ctx); \ + break; \ + default: \ + /* invoke data storage error handler */ \ + gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); \ + } \ + tcg_temp_free_##tp(t0); \ + tcg_temp_free_##tp(t1); \ + tcg_temp_free(EA); \ +} + +ST_ATOMIC(stwat, DEF_MEMOP(MO_UL), i32, trunc_tl_i32) +#if defined(TARGET_PPC64) +ST_ATOMIC(stdat, DEF_MEMOP(MO_Q), i64, mov_i64) +#endif + #if defined(CONFIG_USER_ONLY) static void gen_conditional_store(DisasContext *ctx, TCGv EA, int reg, int memop) @@ -4377,6 +4484,30 @@ static void gen_slbie(DisasContext *ctx) gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif /* defined(CONFIG_USER_ONLY) */ } + +/* slbieg */ +static void gen_slbieg(DisasContext *ctx) +{ +#if defined(CONFIG_USER_ONLY) + GEN_PRIV; +#else + CHK_SV; + + gen_helper_slbieg(cpu_env, cpu_gpr[rB(ctx->opcode)]); +#endif /* defined(CONFIG_USER_ONLY) */ +} + +/* slbsync */ +static void gen_slbsync(DisasContext *ctx) +{ +#if defined(CONFIG_USER_ONLY) + GEN_PRIV; +#else + CHK_SV; + gen_check_tlb_flush(ctx, true); +#endif /* defined(CONFIG_USER_ONLY) */ +} + #endif /* defined(TARGET_PPC64) */ /*** External control ***/ @@ -6025,6 +6156,19 @@ static inline void gen_cp_abort(DisasContext *ctx) // Do Nothing } +#define GEN_CP_PASTE_NOOP(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + /* Generate invalid exception until \ + * we have an implementation of the copy \ + * paste facility \ + */ \ + gen_invalid(ctx); \ +} + +GEN_CP_PASTE_NOOP(copy) +GEN_CP_PASTE_NOOP(paste) + static void gen_tcheck(DisasContext *ctx) { if (unlikely(!ctx->tm_enabled)) { @@ -6174,7 +6318,9 @@ GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER), GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300), GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER), GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER), GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), @@ -6230,10 +6376,14 @@ GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM), GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206), GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206), GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES), +GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206), GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206), GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES), #if defined(TARGET_PPC64) +GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300), GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B), GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207), GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B), @@ -6241,6 +6391,7 @@ GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207), #endif GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC), GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT), +GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039FF801, PPC_NONE, PPC2_ISA300), GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW), GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW), GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW), @@ -6313,6 +6464,8 @@ GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), #if defined(TARGET_PPC64) GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI), GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI), +GEN_HANDLER_E(slbieg, 0x1F, 0x12, 0x0E, 0x001F0001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(slbsync, 0x1F, 0x12, 0x0A, 0x03FFF801, PPC_NONE, PPC2_ISA300), #endif GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN), GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN), diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c index a44c0034a8..7f12908029 100644 --- a/target/ppc/translate/vsx-impl.inc.c +++ b/target/ppc/translate/vsx-impl.inc.c @@ -808,6 +808,10 @@ GEN_VSX_HELPER_2(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX) GEN_VSX_HELPER_2(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX) GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX) GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX) +GEN_VSX_HELPER_2(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300) +GEN_VSX_HELPER_2(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300) +GEN_VSX_HELPER_2(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300) +GEN_VSX_HELPER_2(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300) GEN_VSX_HELPER_2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300) GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX) GEN_VSX_HELPER_2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300) @@ -815,6 +819,8 @@ GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207) GEN_VSX_HELPER_2(xscvqpdp, 0x04, 0x1A, 0x14, PPC2_ISA300) GEN_VSX_HELPER_2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300) GEN_VSX_HELPER_2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300) +GEN_VSX_HELPER_2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300) +GEN_VSX_HELPER_2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300) GEN_VSX_HELPER_2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300) GEN_VSX_HELPER_2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300) GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX) @@ -833,6 +839,11 @@ GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX) GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX) GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207) +GEN_VSX_HELPER_2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300) +GEN_VSX_HELPER_2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300) +GEN_VSX_HELPER_2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300) +GEN_VSX_HELPER_2(xssubqp, 0x04, 0x10, 0, PPC2_ISA300) + GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207) GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207) GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207) diff --git a/target/ppc/translate/vsx-ops.inc.c b/target/ppc/translate/vsx-ops.inc.c index 7dc9f6f477..5030c4aceb 100644 --- a/target/ppc/translate/vsx-ops.inc.c +++ b/target/ppc/translate/vsx-ops.inc.c @@ -103,6 +103,21 @@ GEN_HANDLER_E(name, 0x3F, opc2, opc3, inval, PPC_NONE, PPC2_ISA300) #define GEN_VSX_XFORM_300_EO(name, opc2, opc3, opc4, inval) \ GEN_HANDLER_E_2(name, 0x3F, opc2, opc3, opc4, inval, PPC_NONE, PPC2_ISA300) +#define GEN_VSX_Z23FORM_300(name, opc2, opc3, opc4, inval) \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x0, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x0, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x0, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x0, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x1, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x1, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x1, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x1, inval) + +GEN_VSX_Z23FORM_300(xsrqpi, 0x05, 0x0, 0x0, 0x0), +GEN_VSX_Z23FORM_300(xsrqpxp, 0x05, 0x1, 0x0, 0x0), +GEN_VSX_XFORM_300_EO(xssqrtqp, 0x04, 0x19, 0x1B, 0x0), +GEN_VSX_XFORM_300(xssubqp, 0x04, 0x10, 0x0), + GEN_XX2FORM(xsabsdp, 0x12, 0x15, PPC2_VSX), GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX), GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX), @@ -116,6 +131,8 @@ GEN_VSX_XFORM_300_EO(xscvdpqp, 0x04, 0x1A, 0x16, 0x00000001), GEN_VSX_XFORM_300_EO(xscvqpdp, 0x04, 0x1A, 0x14, 0x0), GEN_VSX_XFORM_300_EO(xscvqpsdz, 0x04, 0x1A, 0x19, 0x00000001), GEN_VSX_XFORM_300_EO(xscvqpswz, 0x04, 0x1A, 0x09, 0x00000001), +GEN_VSX_XFORM_300_EO(xscvqpudz, 0x04, 0x1A, 0x11, 0x00000001), +GEN_VSX_XFORM_300_EO(xscvqpuwz, 0x04, 0x1A, 0x01, 0x00000001), #ifdef TARGET_PPC64 GEN_XX2FORM_EO(xsxexpdp, 0x16, 0x15, 0x00, PPC2_ISA300), @@ -185,6 +202,10 @@ GEN_VSX_XFORM_300(xscmpoqp, 0x04, 0x04, 0x00600001), GEN_VSX_XFORM_300(xscmpuqp, 0x04, 0x14, 0x00600001), GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX), GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX), +GEN_XX3FORM(xsmaxcdp, 0x00, 0x10, PPC2_ISA300), +GEN_XX3FORM(xsmincdp, 0x00, 0x11, PPC2_ISA300), +GEN_XX3FORM(xsmaxjdp, 0x00, 0x12, PPC2_ISA300), +GEN_XX3FORM(xsminjdp, 0x00, 0x13, PPC2_ISA300), GEN_XX2FORM_EO(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300), GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX), GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207), diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c index 76f79fa77b..be35cbd3a2 100644 --- a/target/ppc/translate_init.c +++ b/target/ppc/translate_init.c @@ -8816,8 +8816,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); - /* Using 2.07 defines until new radix model is added. */ - pcc->mmu_model = POWERPC_MMU_2_07; + pcc->mmu_model = POWERPC_MMU_3_00; #if defined(CONFIG_SOFTMMU) pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; /* segment page size remain the same */ @@ -8871,12 +8870,24 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu) lpcr->default_value &= ~LPCR_RMLS; lpcr->default_value |= 1ull << LPCR_RMLS_SHIFT; - /* P7 and P8 has slightly different PECE bits, mostly because P8 adds - * bit 47 and 48 which are reserved on P7. Here we set them all, which - * will work as expected for both implementations - */ - lpcr->default_value |= LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 | - LPCR_P8_PECE3 | LPCR_P8_PECE4; + switch (env->mmu_model) { + case POWERPC_MMU_3_00: + /* By default we choose legacy mode and switch to new hash or radix + * when a register process table hcall is made. So disable process + * tables and guest translation shootdown by default + */ + lpcr->default_value &= ~(LPCR_UPRT | LPCR_GTSE); + lpcr->default_value |= LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | + LPCR_OEE; + break; + default: + /* P7 and P8 has slightly different PECE bits, mostly because P8 adds + * bit 47 and 48 which are reserved on P7. Here we set them all, which + * will work as expected for both implementations + */ + lpcr->default_value |= LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 | + LPCR_P8_PECE3 | LPCR_P8_PECE4; + } /* We should be followed by a CPU reset but update the active value * just in case... |