aboutsummaryrefslogtreecommitdiff
path: root/target/ppc
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-07-03 14:59:27 +0100
committerPeter Maydell <peter.maydell@linaro.org>2018-07-03 14:59:27 +0100
commitb07cd3e748b3f27a17c27afeee578dc4eedb8dd5 (patch)
treec7a6d5ea8459801bcc120b30958397824b8453f3 /target/ppc
parenta395717cbd26e7593d3c3fe81faca121ec6d13e8 (diff)
parent29f9cef39eb1ae55e82c6763eb22d7a1bdff7276 (diff)
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-3.0-20180703' into staging
ppc patch queue 2018-07-03 Here's a last minue pull request before today's soft freeze. Ideally I would have sent this earlier, but I was waiting for a couple of extra fixes I knew were close. And the freeze crept up on me, like always. Most of the changes here are bugfixes in any case. There are some cleanups as well, which have been in my staging tree for a little while. There are a couple of truly new features (some extensions to the sam460ex platform), but these are low risk, since they only affect a new and not really stabilized machine type anyway. Higlights are: * Mac platform improvements from Mark Cave-Ayland * Sam460ex improvements from BALATON Zoltan et al. * XICS interrupt handler cleanups from Cédric Le Goater * TCG improvements for atomic loads and stores from Richard Henderson * Assorted other bugfixes # gpg: Signature made Tue 03 Jul 2018 06:55:22 BST # gpg: using RSA key 6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-3.0-20180703: (35 commits) ppc: Include vga cirrus card into the compiling process target/ppc: Relax reserved bitmask of indexed store instructions target/ppc: set is_jmp on ppc_tr_breakpoint_check spapr: compute default value of "hpt-max-page-size" later target/ppc/kvm: don't pass cpu to kvm_get_smmu_info() target/ppc/kvm: get rid of kvm_get_fallback_smmu_info() ppc440_uc: Basic emulation of PPC440 DMA controller sam460ex: Add RTC device hw/timer: Add basic M41T80 emulation ppc4xx_i2c: Rewrite to model hardware more closely hw/ppc: Give sam46ex its own config option fpu_helper.c: fix setting FPSCR[FI] bit target/ppc: Implement the rest of gen_st_atomic target/ppc: Implement the rest of gen_ld_atomic target/ppc: Use atomic min/max helpers target/ppc: Use MO_ALIGN for EXIWX and ECOWX target/ppc: Split out gen_st_atomic target/ppc: Split out gen_ld_atomic target/ppc: Split out gen_load_locked target/ppc: Tidy gen_conditional_store ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org> # Conflicts: # hw/ppc/spapr.c
Diffstat (limited to 'target/ppc')
-rw-r--r--target/ppc/cpu.h8
-rw-r--r--target/ppc/excp_helper.c18
-rw-r--r--target/ppc/fpu_helper.c8
-rw-r--r--target/ppc/helper.h11
-rw-r--r--target/ppc/internal.h5
-rw-r--r--target/ppc/kvm.c118
-rw-r--r--target/ppc/mem_helper.c72
-rw-r--r--target/ppc/translate.c641
-rw-r--r--target/ppc/translate_init.inc.c1
9 files changed, 541 insertions, 341 deletions
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index c7f3fb6b73..4edcf62cf7 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -196,7 +196,6 @@ enum {
/* QEMU exceptions: special cases we want to stop translation */
POWERPC_EXCP_SYNC = 0x202, /* context synchronizing instruction */
POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */
- POWERPC_EXCP_STCX = 0x204 /* Conditional stores in user mode */
};
/* Exceptions error codes */
@@ -994,10 +993,6 @@ struct CPUPPCState {
/* Reservation value */
target_ulong reserve_val;
target_ulong reserve_val2;
- /* Reservation store address */
- target_ulong reserve_ea;
- /* Reserved store source register and size */
- target_ulong reserve_info;
/* Those ones are used in supervisor mode only */
/* machine state register */
@@ -1015,6 +1010,9 @@ struct CPUPPCState {
/* Next instruction pointer */
target_ulong nip;
+ /* High part of 128-bit helper return. */
+ uint64_t retxh;
+
int access_type; /* when a memory exception occurs, the access
type is stored here */
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index c092fbead0..d6e97a90e0 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -22,7 +22,7 @@
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
-
+#include "internal.h"
#include "helper_regs.h"
//#define DEBUG_OP
@@ -1198,3 +1198,19 @@ void helper_book3s_msgsnd(target_ulong rb)
qemu_mutex_unlock_iothread();
}
#endif
+
+void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ CPUPPCState *env = cs->env_ptr;
+ uint32_t insn;
+
+ /* Restore state and reload the insn we executed, for filling in DSISR. */
+ cpu_restore_state(cs, retaddr, true);
+ insn = cpu_ldl_code(env, env->nip);
+
+ cs->exception_index = POWERPC_EXCP_ALIGN;
+ env->error_code = insn & 0x03FF0000;
+ cpu_loop_exit(cs);
+}
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index 7714bfe0f9..8675d931b6 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -274,6 +274,7 @@ static inline void float_inexact_excp(CPUPPCState *env)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
+ env->fpscr |= 1 << FPSCR_FI;
env->fpscr |= 1 << FPSCR_XX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
@@ -533,6 +534,7 @@ static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
int status = get_float_exception_flags(&env->fp_status);
+ bool inexact_happened = false;
if (status & float_flag_divbyzero) {
float_zero_divide_excp(env, raddr);
@@ -542,6 +544,12 @@ static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
float_underflow_excp(env);
} else if (status & float_flag_inexact) {
float_inexact_excp(env);
+ inexact_happened = true;
+ }
+
+ /* if the inexact flag was not set */
+ if (inexact_happened == false) {
+ env->fpscr &= ~(1 << FPSCR_FI); /* clear the FPSCR[FI] bit */
}
if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index d751f0e219..5706c2497f 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -799,3 +799,14 @@ DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32)
DEF_HELPER_1(tbegin, void, env)
DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)
+
+#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128)
+DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
+DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
+DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG,
+ void, env, tl, i64, i64, i32)
+DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG,
+ void, env, tl, i64, i64, i32)
+DEF_HELPER_5(stqcx_le_parallel, i32, env, tl, i64, i64, i32)
+DEF_HELPER_5(stqcx_be_parallel, i32, env, tl, i64, i64, i32)
+#endif
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
index 1f441c6483..a9bcadff42 100644
--- a/target/ppc/internal.h
+++ b/target/ppc/internal.h
@@ -252,4 +252,9 @@ static inline void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
void helper_compute_fprf_float32(CPUPPCState *env, float32 arg);
void helper_compute_fprf_float128(CPUPPCState *env, float128 arg);
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
#endif /* PPC_INTERNAL_H */
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 4df4ff6cbf..9211ee2ee1 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -248,107 +248,25 @@ static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
#if defined(TARGET_PPC64)
-static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
- struct kvm_ppc_smmu_info *info)
+static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
{
- CPUPPCState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
-
- memset(info, 0, sizeof(*info));
-
- /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
- * need to "guess" what the supported page sizes are.
- *
- * For that to work we make a few assumptions:
- *
- * - Check whether we are running "PR" KVM which only supports 4K
- * and 16M pages, but supports them regardless of the backing
- * store characteritics. We also don't support 1T segments.
- *
- * This is safe as if HV KVM ever supports that capability or PR
- * KVM grows supports for more page/segment sizes, those versions
- * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
- * will not hit this fallback
- *
- * - Else we are running HV KVM. This means we only support page
- * sizes that fit in the backing store. Additionally we only
- * advertize 64K pages if the processor is ARCH 2.06 and we assume
- * P7 encodings for the SLB and hash table. Here too, we assume
- * support for any newer processor will mean a kernel that
- * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
- * this fallback.
- */
- if (kvmppc_is_pr(cs->kvm_state)) {
- /* No flags */
- info->flags = 0;
- info->slb_size = 64;
-
- /* Standard 4k base page size segment */
- info->sps[0].page_shift = 12;
- info->sps[0].slb_enc = 0;
- info->sps[0].enc[0].page_shift = 12;
- info->sps[0].enc[0].pte_enc = 0;
-
- /* Standard 16M large page size segment */
- info->sps[1].page_shift = 24;
- info->sps[1].slb_enc = SLB_VSID_L;
- info->sps[1].enc[0].page_shift = 24;
- info->sps[1].enc[0].pte_enc = 0;
- } else {
- int i = 0;
-
- /* HV KVM has backing store size restrictions */
- info->flags = KVM_PPC_PAGE_SIZES_REAL;
-
- if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
- info->flags |= KVM_PPC_1T_SEGMENTS;
- }
-
- if (env->mmu_model == POWERPC_MMU_2_06 ||
- env->mmu_model == POWERPC_MMU_2_07) {
- info->slb_size = 32;
- } else {
- info->slb_size = 64;
- }
+ int ret;
- /* Standard 4k base page size segment */
- info->sps[i].page_shift = 12;
- info->sps[i].slb_enc = 0;
- info->sps[i].enc[0].page_shift = 12;
- info->sps[i].enc[0].pte_enc = 0;
- i++;
-
- /* 64K on MMU 2.06 and later */
- if (env->mmu_model == POWERPC_MMU_2_06 ||
- env->mmu_model == POWERPC_MMU_2_07) {
- info->sps[i].page_shift = 16;
- info->sps[i].slb_enc = 0x110;
- info->sps[i].enc[0].page_shift = 16;
- info->sps[i].enc[0].pte_enc = 1;
- i++;
- }
+ assert(kvm_state != NULL);
- /* Standard 16M large page size segment */
- info->sps[i].page_shift = 24;
- info->sps[i].slb_enc = SLB_VSID_L;
- info->sps[i].enc[0].page_shift = 24;
- info->sps[i].enc[0].pte_enc = 0;
+ if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
+ error_setg(errp, "KVM doesn't expose the MMU features it supports");
+ error_append_hint(errp, "Consider switching to a newer KVM\n");
+ return;
}
-}
-
-static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
-{
- CPUState *cs = CPU(cpu);
- int ret;
- if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
- ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
- if (ret == 0) {
- return;
- }
+ ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
+ if (ret == 0) {
+ return;
}
- kvm_get_fallback_smmu_info(cpu, info);
+ error_setg_errno(errp, -ret,
+ "KVM failed to provide the MMU features it supports");
}
struct ppc_radix_page_info *kvm_get_radix_page_info(void)
@@ -408,14 +326,13 @@ target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
bool kvmppc_hpt_needs_host_contiguous_pages(void)
{
- PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
static struct kvm_ppc_smmu_info smmu_info;
if (!kvm_enabled()) {
return false;
}
- kvm_get_smmu_info(cpu, &smmu_info);
+ kvm_get_smmu_info(&smmu_info, &error_fatal);
return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
}
@@ -423,13 +340,18 @@ void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
{
struct kvm_ppc_smmu_info smmu_info;
int iq, ik, jq, jk;
+ Error *local_err = NULL;
/* For now, we only have anything to check on hash64 MMUs */
if (!cpu->hash64_opts || !kvm_enabled()) {
return;
}
- kvm_get_smmu_info(cpu, &smmu_info);
+ kvm_get_smmu_info(&smmu_info, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
&& !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
@@ -2168,7 +2090,7 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
/* Find the largest hardware supported page size that's less than
* or equal to the (logical) backing page size of guest RAM */
- kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
+ kvm_get_smmu_info(&info, &error_fatal);
rampagesize = qemu_getrampagesize();
best_page_shift = 0;
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index a34e604db3..8f0d86d104 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -21,9 +21,9 @@
#include "exec/exec-all.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-
#include "helper_regs.h"
#include "exec/cpu_ldst.h"
+#include "tcg.h"
#include "internal.h"
//#define DEBUG_OP
@@ -215,6 +215,76 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
return i;
}
+#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128)
+uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
+ uint32_t opidx)
+{
+ Int128 ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
+ env->retxh = int128_gethi(ret);
+ return int128_getlo(ret);
+}
+
+uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
+ uint32_t opidx)
+{
+ Int128 ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
+ env->retxh = int128_gethi(ret);
+ return int128_getlo(ret);
+}
+
+void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t lo, uint64_t hi, uint32_t opidx)
+{
+ Int128 val = int128_make128(lo, hi);
+ helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
+}
+
+void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t lo, uint64_t hi, uint32_t opidx)
+{
+ Int128 val = int128_make128(lo, hi);
+ helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
+}
+
+uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t new_lo, uint64_t new_hi,
+ uint32_t opidx)
+{
+ bool success = false;
+
+ if (likely(addr == env->reserve_addr)) {
+ Int128 oldv, cmpv, newv;
+
+ cmpv = int128_make128(env->reserve_val2, env->reserve_val);
+ newv = int128_make128(new_lo, new_hi);
+ oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
+ opidx, GETPC());
+ success = int128_eq(oldv, cmpv);
+ }
+ env->reserve_addr = -1;
+ return env->so + success * CRF_EQ_BIT;
+}
+
+uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t new_lo, uint64_t new_hi,
+ uint32_t opidx)
+{
+ bool success = false;
+
+ if (likely(addr == env->reserve_addr)) {
+ Int128 oldv, cmpv, newv;
+
+ cmpv = int128_make128(env->reserve_val2, env->reserve_val);
+ newv = int128_make128(new_lo, new_hi);
+ oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
+ opidx, GETPC());
+ success = int128_eq(oldv, cmpv);
+ }
+ env->reserve_addr = -1;
+ return env->so + success * CRF_EQ_BIT;
+}
+#endif
+
/*****************************************************************************/
/* Altivec extension helpers */
#if defined(HOST_WORDS_BIGENDIAN)
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 3a215a1dc6..9eaa10b421 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -2388,23 +2388,6 @@ static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
}
}
-static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
-{
- TCGLabel *l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1, t2;
- tcg_gen_andi_tl(t0, EA, mask);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
- t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
- t2 = tcg_const_i32(ctx->opcode & 0x03FF0000);
- gen_update_nip(ctx, ctx->base.pc_next - 4);
- gen_helper_raise_exception_err(cpu_env, t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- gen_set_label(l1);
- tcg_temp_free(t0);
-}
-
static inline void gen_align_no_le(DisasContext *ctx)
{
gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
@@ -2607,7 +2590,7 @@ static void gen_ld(DisasContext *ctx)
static void gen_lq(DisasContext *ctx)
{
int ra, rd;
- TCGv EA;
+ TCGv EA, hi, lo;
/* lq is a legal user mode instruction starting in ISA 2.07 */
bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
@@ -2633,16 +2616,35 @@ static void gen_lq(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x0F);
- /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
- necessary 64-bit byteswap already. */
- if (unlikely(ctx->le_mode)) {
- gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
+ /* Note that the low part is always in RD+1, even in LE mode. */
+ lo = cpu_gpr[rd + 1];
+ hi = cpu_gpr[rd];
+
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+#ifdef CONFIG_ATOMIC128
+ TCGv_i32 oi = tcg_temp_new_i32();
+ if (ctx->le_mode) {
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
+ gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
+ } else {
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
+ gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
+ }
+ tcg_temp_free_i32(oi);
+ tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
+#else
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+#endif
+ } else if (ctx->le_mode) {
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
} else {
- gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
}
@@ -2741,6 +2743,7 @@ static void gen_std(DisasContext *ctx)
if ((ctx->opcode & 0x3) == 0x2) { /* stq */
bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
+ TCGv hi, lo;
if (!(ctx->insns_flags & PPC_64BX)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
@@ -2764,20 +2767,38 @@ static void gen_std(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
- /* We only need to swap high and low halves. gen_qemu_st64_i64 does
- necessary 64-bit byteswap already. */
- if (unlikely(ctx->le_mode)) {
- gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
+ /* Note that the low part is always in RS+1, even in LE mode. */
+ lo = cpu_gpr[rs + 1];
+ hi = cpu_gpr[rs];
+
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+#ifdef CONFIG_ATOMIC128
+ TCGv_i32 oi = tcg_temp_new_i32();
+ if (ctx->le_mode) {
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
+ gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
+ } else {
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
+ gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
+ }
+ tcg_temp_free_i32(oi);
+#else
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+#endif
+ } else if (ctx->le_mode) {
+ tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
+ tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_LEQ);
} else {
- gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
+ tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_BEQ);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
+ tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
} else {
- /* std / stdu*/
+ /* std / stdu */
if (Rc(ctx->opcode)) {
if (unlikely(rA(ctx->opcode) == 0)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
@@ -3032,23 +3053,24 @@ static void gen_isync(DisasContext *ctx)
#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
-#define LARX(name, memop) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv t0; \
- TCGv gpr = cpu_gpr[rD(ctx->opcode)]; \
- int len = MEMOP_GET_SIZE(memop); \
- gen_set_access_type(ctx, ACCESS_RES); \
- t0 = tcg_temp_local_new(); \
- gen_addr_reg_index(ctx, t0); \
- if ((len) > 1) { \
- gen_check_align(ctx, t0, (len)-1); \
- } \
- tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \
- tcg_gen_mov_tl(cpu_reserve, t0); \
- tcg_gen_mov_tl(cpu_reserve_val, gpr); \
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); \
- tcg_temp_free(t0); \
+static void gen_load_locked(DisasContext *ctx, TCGMemOp memop)
+{
+ TCGv gpr = cpu_gpr[rD(ctx->opcode)];
+ TCGv t0 = tcg_temp_new();
+
+ gen_set_access_type(ctx, ACCESS_RES);
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
+ tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_gen_mov_tl(cpu_reserve_val, gpr);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ tcg_temp_free(t0);
+}
+
+#define LARX(name, memop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ gen_load_locked(ctx, memop); \
}
/* lwarx */
@@ -3056,134 +3078,239 @@ LARX(lbarx, DEF_MEMOP(MO_UB))
LARX(lharx, DEF_MEMOP(MO_UW))
LARX(lwarx, DEF_MEMOP(MO_UL))
-#define LD_ATOMIC(name, memop, tp, op, eop) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- int len = MEMOP_GET_SIZE(memop); \
- uint32_t gpr_FC = FC(ctx->opcode); \
- TCGv EA = tcg_temp_local_new(); \
- TCGv_##tp t0, t1; \
- \
- gen_addr_register(ctx, EA); \
- if (len > 1) { \
- gen_check_align(ctx, EA, len - 1); \
- } \
- t0 = tcg_temp_new_##tp(); \
- t1 = tcg_temp_new_##tp(); \
- tcg_gen_##op(t0, cpu_gpr[rD(ctx->opcode) + 1]); \
- \
- switch (gpr_FC) { \
- case 0: /* Fetch and add */ \
- tcg_gen_atomic_fetch_add_##tp(t1, EA, t0, ctx->mem_idx, memop); \
- break; \
- case 1: /* Fetch and xor */ \
- tcg_gen_atomic_fetch_xor_##tp(t1, EA, t0, ctx->mem_idx, memop); \
- break; \
- case 2: /* Fetch and or */ \
- tcg_gen_atomic_fetch_or_##tp(t1, EA, t0, ctx->mem_idx, memop); \
- break; \
- case 3: /* Fetch and 'and' */ \
- tcg_gen_atomic_fetch_and_##tp(t1, EA, t0, ctx->mem_idx, memop); \
- break; \
- case 8: /* Swap */ \
- tcg_gen_atomic_xchg_##tp(t1, EA, t0, ctx->mem_idx, memop); \
- break; \
- case 4: /* Fetch and max unsigned */ \
- case 5: /* Fetch and max signed */ \
- case 6: /* Fetch and min unsigned */ \
- case 7: /* Fetch and min signed */ \
- case 16: /* compare and swap not equal */ \
- case 24: /* Fetch and increment bounded */ \
- case 25: /* Fetch and increment equal */ \
- case 28: /* Fetch and decrement bounded */ \
- gen_invalid(ctx); \
- break; \
- default: \
- /* invoke data storage error handler */ \
- gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); \
- } \
- tcg_gen_##eop(cpu_gpr[rD(ctx->opcode)], t1); \
- tcg_temp_free_##tp(t0); \
- tcg_temp_free_##tp(t1); \
- tcg_temp_free(EA); \
-}
-
-LD_ATOMIC(lwat, DEF_MEMOP(MO_UL), i32, trunc_tl_i32, extu_i32_tl)
-#if defined(TARGET_PPC64)
-LD_ATOMIC(ldat, DEF_MEMOP(MO_Q), i64, mov_i64, mov_i64)
-#endif
+static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop,
+ TCGv EA, TCGCond cond, int addend)
+{
+ TCGv t = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv u = tcg_temp_new();
-#define ST_ATOMIC(name, memop, tp, op) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- int len = MEMOP_GET_SIZE(memop); \
- uint32_t gpr_FC = FC(ctx->opcode); \
- TCGv EA = tcg_temp_local_new(); \
- TCGv_##tp t0, t1; \
- \
- gen_addr_register(ctx, EA); \
- if (len > 1) { \
- gen_check_align(ctx, EA, len - 1); \
- } \
- t0 = tcg_temp_new_##tp(); \
- t1 = tcg_temp_new_##tp(); \
- tcg_gen_##op(t0, cpu_gpr[rD(ctx->opcode) + 1]); \
- \
- switch (gpr_FC) { \
- case 0: /* add and Store */ \
- tcg_gen_atomic_add_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
- break; \
- case 1: /* xor and Store */ \
- tcg_gen_atomic_xor_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
- break; \
- case 2: /* Or and Store */ \
- tcg_gen_atomic_or_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
- break; \
- case 3: /* 'and' and Store */ \
- tcg_gen_atomic_and_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \
- break; \
- case 4: /* Store max unsigned */ \
- case 5: /* Store max signed */ \
- case 6: /* Store min unsigned */ \
- case 7: /* Store min signed */ \
- case 24: /* Store twin */ \
- gen_invalid(ctx); \
- break; \
- default: \
- /* invoke data storage error handler */ \
- gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); \
- } \
- tcg_temp_free_##tp(t0); \
- tcg_temp_free_##tp(t1); \
- tcg_temp_free(EA); \
-}
-
-ST_ATOMIC(stwat, DEF_MEMOP(MO_UL), i32, trunc_tl_i32)
-#if defined(TARGET_PPC64)
-ST_ATOMIC(stdat, DEF_MEMOP(MO_Q), i64, mov_i64)
+ tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
+ tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
+ tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
+ tcg_gen_addi_tl(u, t, addend);
+
+ /* E.g. for fetch and increment bounded... */
+ /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
+ tcg_gen_movcond_tl(cond, u, t, t2, u, t);
+ tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
+
+ /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
+ tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
+ tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
+
+ tcg_temp_free(t);
+ tcg_temp_free(t2);
+ tcg_temp_free(u);
+}
+
+static void gen_ld_atomic(DisasContext *ctx, TCGMemOp memop)
+{
+ uint32_t gpr_FC = FC(ctx->opcode);
+ TCGv EA = tcg_temp_new();
+ int rt = rD(ctx->opcode);
+ bool need_serial;
+ TCGv src, dst;
+
+ gen_addr_register(ctx, EA);
+ dst = cpu_gpr[rt];
+ src = cpu_gpr[(rt + 1) & 31];
+
+ need_serial = false;
+ memop |= MO_ALIGN;
+ switch (gpr_FC) {
+ case 0: /* Fetch and add */
+ tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 1: /* Fetch and xor */
+ tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 2: /* Fetch and or */
+ tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 3: /* Fetch and 'and' */
+ tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 4: /* Fetch and max unsigned */
+ tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 5: /* Fetch and max signed */
+ tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 6: /* Fetch and min unsigned */
+ tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 7: /* Fetch and min signed */
+ tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 8: /* Swap */
+ tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+
+ case 16: /* Compare and swap not equal */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ need_serial = true;
+ } else {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
+ if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
+ tcg_gen_mov_tl(t1, src);
+ } else {
+ tcg_gen_ext32u_tl(t1, src);
+ }
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
+ cpu_gpr[(rt + 2) & 31], t0);
+ tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
+ tcg_gen_mov_tl(dst, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ break;
+
+ case 24: /* Fetch and increment bounded */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ need_serial = true;
+ } else {
+ gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
+ }
+ break;
+ case 25: /* Fetch and increment equal */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ need_serial = true;
+ } else {
+ gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
+ }
+ break;
+ case 28: /* Fetch and decrement bounded */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ need_serial = true;
+ } else {
+ gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
+ }
+ break;
+
+ default:
+ /* invoke data storage error handler */
+ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
+ }
+ tcg_temp_free(EA);
+
+ if (need_serial) {
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+}
+
+static void gen_lwat(DisasContext *ctx)
+{
+ gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
+}
+
+#ifdef TARGET_PPC64
+static void gen_ldat(DisasContext *ctx)
+{
+ gen_ld_atomic(ctx, DEF_MEMOP(MO_Q));
+}
#endif
-#if defined(CONFIG_USER_ONLY)
-static void gen_conditional_store(DisasContext *ctx, TCGv EA,
- int reg, int memop)
+static void gen_st_atomic(DisasContext *ctx, TCGMemOp memop)
{
- TCGv t0 = tcg_temp_new();
+ uint32_t gpr_FC = FC(ctx->opcode);
+ TCGv EA = tcg_temp_new();
+ TCGv src, discard;
- tcg_gen_st_tl(EA, cpu_env, offsetof(CPUPPCState, reserve_ea));
- tcg_gen_movi_tl(t0, (MEMOP_GET_SIZE(memop) << 5) | reg);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, reserve_info));
- tcg_temp_free(t0);
- gen_exception_err(ctx, POWERPC_EXCP_STCX, 0);
+ gen_addr_register(ctx, EA);
+ src = cpu_gpr[rD(ctx->opcode)];
+ discard = tcg_temp_new();
+
+ memop |= MO_ALIGN;
+ switch (gpr_FC) {
+ case 0: /* add and Store */
+ tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 1: /* xor and Store */
+ tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 2: /* Or and Store */
+ tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 3: /* 'and' and Store */
+ tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 4: /* Store max unsigned */
+ tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 5: /* Store max signed */
+ tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 6: /* Store min unsigned */
+ tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 7: /* Store min signed */
+ tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 24: /* Store twin */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ } else {
+ TCGv t = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv s = tcg_temp_new();
+ TCGv s2 = tcg_temp_new();
+ TCGv ea_plus_s = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
+ tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
+ tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
+ tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
+ tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
+ tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
+ tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
+
+ tcg_temp_free(ea_plus_s);
+ tcg_temp_free(s2);
+ tcg_temp_free(s);
+ tcg_temp_free(t2);
+ tcg_temp_free(t);
+ }
+ break;
+ default:
+ /* invoke data storage error handler */
+ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
+ }
+ tcg_temp_free(discard);
+ tcg_temp_free(EA);
}
-#else
-static void gen_conditional_store(DisasContext *ctx, TCGv EA,
- int reg, int memop)
+
+static void gen_stwat(DisasContext *ctx)
+{
+ gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
+}
+
+#ifdef TARGET_PPC64
+static void gen_stdat(DisasContext *ctx)
+{
+ gen_st_atomic(ctx, DEF_MEMOP(MO_Q));
+}
+#endif
+
+static void gen_conditional_store(DisasContext *ctx, TCGMemOp memop)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
- TCGv t0;
+ TCGv t0 = tcg_temp_new();
+ int reg = rS(ctx->opcode);
- tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
+ gen_set_access_type(ctx, ACCESS_RES);
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
+ tcg_temp_free(t0);
t0 = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
@@ -3206,21 +3333,11 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA,
gen_set_label(l2);
tcg_gen_movi_tl(cpu_reserve, -1);
}
-#endif
-#define STCX(name, memop) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv t0; \
- int len = MEMOP_GET_SIZE(memop); \
- gen_set_access_type(ctx, ACCESS_RES); \
- t0 = tcg_temp_local_new(); \
- gen_addr_reg_index(ctx, t0); \
- if (len > 1) { \
- gen_check_align(ctx, t0, (len) - 1); \
- } \
- gen_conditional_store(ctx, t0, rS(ctx->opcode), memop); \
- tcg_temp_free(t0); \
+#define STCX(name, memop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ gen_conditional_store(ctx, memop); \
}
STCX(stbcx_, DEF_MEMOP(MO_UB))
@@ -3236,9 +3353,8 @@ STCX(stdcx_, DEF_MEMOP(MO_Q))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
{
- TCGv EA;
int rd = rD(ctx->opcode);
- TCGv gpr1, gpr2;
+ TCGv EA, hi, lo;
if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
(rd == rB(ctx->opcode)))) {
@@ -3247,73 +3363,125 @@ static void gen_lqarx(DisasContext *ctx)
}
gen_set_access_type(ctx, ACCESS_RES);
- EA = tcg_temp_local_new();
+ EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- gen_check_align(ctx, EA, 15);
- if (unlikely(ctx->le_mode)) {
- gpr1 = cpu_gpr[rd+1];
- gpr2 = cpu_gpr[rd];
+
+ /* Note that the low part is always in RD+1, even in LE mode. */
+ lo = cpu_gpr[rd + 1];
+ hi = cpu_gpr[rd];
+
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+#ifdef CONFIG_ATOMIC128
+ TCGv_i32 oi = tcg_temp_new_i32();
+ if (ctx->le_mode) {
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
+ ctx->mem_idx));
+ gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
+ } else {
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
+ ctx->mem_idx));
+ gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
+ }
+ tcg_temp_free_i32(oi);
+ tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
+#else
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ tcg_temp_free(EA);
+ return;
+#endif
+ } else if (ctx->le_mode) {
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
+ tcg_gen_mov_tl(cpu_reserve, EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
} else {
- gpr1 = cpu_gpr[rd];
- gpr2 = cpu_gpr[rd+1];
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16);
+ tcg_gen_mov_tl(cpu_reserve, EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
}
- tcg_gen_qemu_ld_i64(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
- tcg_gen_mov_tl(cpu_reserve, EA);
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
-
- tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val));
- tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2));
tcg_temp_free(EA);
+
+ tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
+ tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
}
/* stqcx. */
static void gen_stqcx_(DisasContext *ctx)
{
- TCGv EA;
- int reg = rS(ctx->opcode);
- int len = 16;
-#if !defined(CONFIG_USER_ONLY)
- TCGLabel *l1;
- TCGv gpr1, gpr2;
-#endif
+ int rs = rS(ctx->opcode);
+ TCGv EA, hi, lo;
- if (unlikely((rD(ctx->opcode) & 1))) {
+ if (unlikely(rs & 1)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
+
gen_set_access_type(ctx, ACCESS_RES);
- EA = tcg_temp_local_new();
+ EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- if (len > 1) {
- gen_check_align(ctx, EA, (len) - 1);
- }
-#if defined(CONFIG_USER_ONLY)
- gen_conditional_store(ctx, EA, reg, 16);
-#else
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
- l1 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
- tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
+ /* Note that the low part is always in RS+1, even in LE mode. */
+ lo = cpu_gpr[rs + 1];
+ hi = cpu_gpr[rs];
- if (unlikely(ctx->le_mode)) {
- gpr1 = cpu_gpr[reg + 1];
- gpr2 = cpu_gpr[reg];
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
+#ifdef CONFIG_ATOMIC128
+ if (ctx->le_mode) {
+ gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi);
+ } else {
+ gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi);
+ }
+#else
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+#endif
+ tcg_temp_free(EA);
+ tcg_temp_free_i32(oi);
} else {
- gpr1 = cpu_gpr[reg];
- gpr2 = cpu_gpr[reg + 1];
- }
- tcg_gen_qemu_st_tl(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_st_tl(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+ TCGLabel *lab_fail = gen_new_label();
+ TCGLabel *lab_over = gen_new_label();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
- gen_set_label(l1);
- tcg_gen_movi_tl(cpu_reserve, -1);
-#endif
- tcg_temp_free(EA);
-}
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
+ tcg_temp_free(EA);
+ gen_qemu_ld64_i64(ctx, t0, cpu_reserve);
+ tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
+ ? offsetof(CPUPPCState, reserve_val2)
+ : offsetof(CPUPPCState, reserve_val)));
+ tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+
+ tcg_gen_addi_i64(t0, cpu_reserve, 8);
+ gen_qemu_ld64_i64(ctx, t0, t0);
+ tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
+ ? offsetof(CPUPPCState, reserve_val)
+ : offsetof(CPUPPCState, reserve_val2)));
+ tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+
+ /* Success */
+ gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve);
+ tcg_gen_addi_i64(t0, cpu_reserve, 8);
+ gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0);
+
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
+ tcg_gen_br(lab_over);
+
+ gen_set_label(lab_fail);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+
+ gen_set_label(lab_over);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
#endif /* defined(TARGET_PPC64) */
/* sync */
@@ -4636,8 +4804,8 @@ static void gen_eciwx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_EXT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_check_align(ctx, t0, 0x03);
- gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
+ DEF_MEMOP(MO_UL | MO_ALIGN));
tcg_temp_free(t0);
}
@@ -4649,8 +4817,8 @@ static void gen_ecowx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_EXT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_check_align(ctx, t0, 0x03);
- gen_qemu_st32(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
+ DEF_MEMOP(MO_UL | MO_ALIGN));
tcg_temp_free(t0);
}
@@ -6886,7 +7054,7 @@ GEN_HANDLER(stop##u, opc, 0xFF, 0xFF, 0x00000000, type),
#define GEN_STUX(name, stop, opc2, opc3, type) \
GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type),
#define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
-GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
+GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
#define GEN_STS(name, stop, op, type) \
GEN_ST(name, stop, op | 0x20, type) \
GEN_STU(name, stop, op | 0x21, type) \
@@ -7314,6 +7482,7 @@ static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
DisasContext *ctx = container_of(dcbase, DisasContext, base);
gen_debug_exception(ctx);
+ dcbase->is_jmp = DISAS_NORETURN;
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
diff --git a/target/ppc/translate_init.inc.c b/target/ppc/translate_init.inc.c
index 76d6f3fd5e..7813b1b004 100644
--- a/target/ppc/translate_init.inc.c
+++ b/target/ppc/translate_init.inc.c
@@ -10457,6 +10457,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = ppc_cpu_set_pc;
cc->gdb_read_register = ppc_cpu_gdb_read_register;
cc->gdb_write_register = ppc_cpu_gdb_write_register;
+ cc->do_unaligned_access = ppc_cpu_do_unaligned_access;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = ppc_cpu_handle_mmu_fault;
#else