aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--hw/intc/pnv_xive2.c24
-rw-r--r--hw/intc/pnv_xive2_regs.h8
-rw-r--r--hw/intc/xive.c16
-rw-r--r--hw/ppc/Kconfig1
-rw-r--r--hw/ppc/ppc.c11
-rw-r--r--include/hw/ppc/openpic.h2
-rw-r--r--include/hw/ppc/xive_regs.h16
-rw-r--r--target/ppc/cpu.h19
-rw-r--r--target/ppc/cpu_init.c24
-rw-r--r--target/ppc/excp_helper.c14
-rw-r--r--target/ppc/helper_regs.c73
-rw-r--r--target/ppc/helper_regs.h1
-rw-r--r--target/ppc/machine.c8
-rw-r--r--target/ppc/mmu_common.c91
-rw-r--r--target/ppc/mmu_helper.c32
-rw-r--r--target/ppc/power8-pmu.c60
-rw-r--r--target/ppc/power8-pmu.h4
-rw-r--r--target/ppc/translate.c80
-rw-r--r--tests/avocado/tuxrun_baselines.py1
19 files changed, 289 insertions, 196 deletions
diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c
index 7176d70234..ec1edeb385 100644
--- a/hw/intc/pnv_xive2.c
+++ b/hw/intc/pnv_xive2.c
@@ -163,7 +163,9 @@ static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
if (!(vsd & VSD_ADDRESS_MASK)) {
+#ifdef XIVE2_DEBUG
xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
+#endif
return 0;
}
@@ -185,7 +187,9 @@ static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
MEMTXATTRS_UNSPECIFIED);
if (!(vsd & VSD_ADDRESS_MASK)) {
+#ifdef XIVE2_DEBUG
xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
+#endif
return 0;
}
@@ -955,6 +959,10 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
val = xive->vc_regs[reg];
break;
+ case VC_ESBC_CFG:
+ val = xive->vc_regs[reg];
+ break;
+
/*
* EAS cache updates (not modeled)
*/
@@ -1046,6 +1054,9 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
/* ESB update */
break;
+ case VC_ESBC_CFG:
+ break;
+
/*
* EAS cache updates (not modeled)
*/
@@ -1265,6 +1276,9 @@ static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
case TCTXT_EN1_RESET:
val = xive->tctxt_regs[TCTXT_EN1 >> 3];
break;
+ case TCTXT_CFG:
+ val = xive->tctxt_regs[reg];
+ break;
default:
xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
}
@@ -1276,6 +1290,7 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
uint64_t val, unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint32_t reg = offset >> 3;
switch (offset) {
/*
@@ -1283,6 +1298,7 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
*/
case TCTXT_EN0: /* Physical Thread Enable */
case TCTXT_EN1: /* Physical Thread Enable (fused core) */
+ xive->tctxt_regs[reg] = val;
break;
case TCTXT_EN0_SET:
@@ -1297,7 +1313,9 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
case TCTXT_EN1_RESET:
xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
break;
-
+ case TCTXT_CFG:
+ xive->tctxt_regs[reg] = val;
+ break;
default:
xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
return;
@@ -1648,6 +1666,8 @@ static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
bool gen1_tima_os =
xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
+ offset &= TM_ADDRESS_MASK;
+
/* TODO: should we switch the TM ops table instead ? */
if (!gen1_tima_os && offset == HV_PUSH_OS_CTX_OFFSET) {
xive2_tm_push_os_ctx(xptr, tctx, offset, value, size);
@@ -1667,6 +1687,8 @@ static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
bool gen1_tima_os =
xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
+ offset &= TM_ADDRESS_MASK;
+
/* TODO: should we switch the TM ops table instead ? */
if (!gen1_tima_os && offset == HV_PULL_OS_CTX_OFFSET) {
return xive2_tm_pull_os_ctx(xptr, tctx, offset, size);
diff --git a/hw/intc/pnv_xive2_regs.h b/hw/intc/pnv_xive2_regs.h
index 0c096e4adb..7165dc8704 100644
--- a/hw/intc/pnv_xive2_regs.h
+++ b/hw/intc/pnv_xive2_regs.h
@@ -232,6 +232,10 @@
#define VC_ESBC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35)
#define VC_ESBC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */
+/* ESBC configuration */
+#define X_VC_ESBC_CFG 0x148
+#define VC_ESBC_CFG 0x240
+
/* EASC flush control register */
#define X_VC_EASC_FLUSH_CTRL 0x160
#define VC_EASC_FLUSH_CTRL 0x300
@@ -405,6 +409,10 @@
#define X_TCTXT_EN1_RESET 0x307
#define TCTXT_EN1_RESET 0x038
+/* TCTXT Config register */
+#define X_TCTXT_CFG 0x328
+#define TCTXT_CFG 0x140
+
/*
* VSD Tables
*/
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index a986b96843..5204c14b87 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -249,7 +249,7 @@ static const uint8_t *xive_tm_views[] = {
static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
{
uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
- uint8_t reg_offset = offset & 0x3F;
+ uint8_t reg_offset = offset & TM_REG_OFFSET;
uint8_t reg_mask = write ? 0x1 : 0x2;
uint64_t mask = 0x0;
int i;
@@ -266,8 +266,8 @@ static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
unsigned size)
{
- uint8_t ring_offset = offset & 0x30;
- uint8_t reg_offset = offset & 0x3F;
+ uint8_t ring_offset = offset & TM_RING_OFFSET;
+ uint8_t reg_offset = offset & TM_REG_OFFSET;
uint64_t mask = xive_tm_mask(offset, size, true);
int i;
@@ -296,8 +296,8 @@ static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
{
- uint8_t ring_offset = offset & 0x30;
- uint8_t reg_offset = offset & 0x3F;
+ uint8_t ring_offset = offset & TM_RING_OFFSET;
+ uint8_t reg_offset = offset & TM_REG_OFFSET;
uint64_t mask = xive_tm_mask(offset, size, false);
uint64_t ret;
int i;
@@ -500,7 +500,7 @@ static const XiveTmOp xive_tm_operations[] = {
static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
{
uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
- uint32_t op_offset = offset & 0xFFF;
+ uint32_t op_offset = offset & TM_ADDRESS_MASK;
int i;
for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
@@ -534,7 +534,7 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
/*
* First, check for special operations in the 2K region
*/
- if (offset & 0x800) {
+ if (offset & TM_SPECIAL_OP) {
xto = xive_tm_find_op(offset, size, true);
if (!xto) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
@@ -573,7 +573,7 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
/*
* First, check for special operations in the 2K region
*/
- if (offset & 0x800) {
+ if (offset & TM_SPECIAL_OP) {
xto = xive_tm_find_op(offset, size, false);
if (!xto) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig
index a689d9b219..5dfbf47ef5 100644
--- a/hw/ppc/Kconfig
+++ b/hw/ppc/Kconfig
@@ -115,6 +115,7 @@ config MAC_NEWWORLD
select MAC_PMU
select UNIN_PCI
select FW_CFG_PPC
+ select USB_OHCI_PCI
config E500
bool
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index 4e816c68c7..1b1220c423 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -798,6 +798,8 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
int64_t signed_decr;
/* Truncate value to decr_width and sign extend for simplicity */
+ value = extract64(value, 0, nr_bits);
+ decr = extract64(decr, 0, nr_bits);
signed_value = sextract64(value, 0, nr_bits);
signed_decr = sextract64(decr, 0, nr_bits);
@@ -809,11 +811,7 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
}
/*
- * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
- * interrupt.
- *
- * If we get a really small DEC value, we can assume that by the time we
- * handled it we should inject an interrupt already.
+ * Going from 1 -> 0 or 0 -> -1 is the event to generate a DEC interrupt.
*
* On MSB level based DEC implementations the MSB always means the interrupt
* is pending, so raise it on those.
@@ -821,8 +819,7 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
* On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
* an edge interrupt, so raise it here too.
*/
- if ((value < 3) ||
- ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
+ if (((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
&& signed_decr >= 0)) {
(*raise_excp)(cpu);
diff --git a/include/hw/ppc/openpic.h b/include/hw/ppc/openpic.h
index ebdaf8a493..bae8dafe16 100644
--- a/include/hw/ppc/openpic.h
+++ b/include/hw/ppc/openpic.h
@@ -55,7 +55,7 @@ typedef enum IRQType {
* Round up to the nearest 64 IRQs so that the queue length
* won't change when moving between 32 and 64 bit hosts.
*/
-#define IRQQUEUE_SIZE_BITS ((OPENPIC_MAX_IRQ + 63) & ~63)
+#define IRQQUEUE_SIZE_BITS ROUND_UP(OPENPIC_MAX_IRQ, 64)
typedef struct IRQQueue {
unsigned long *queue;
diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h
index b7fde2354e..4a3c9badd3 100644
--- a/include/hw/ppc/xive_regs.h
+++ b/include/hw/ppc/xive_regs.h
@@ -48,6 +48,22 @@
#define TM_SHIFT 16
+/*
+ * TIMA addresses are 12-bits (4k page).
+ * The MSB indicates a special op with side effect, which can be
+ * refined with bit 10 (see below).
+ * The registers, logically grouped in 4 rings (a quad-word each), are
+ * defined on the 6 LSBs (offset below 0x40)
+ * In between, we can add a cache line index from 0...3 (ie, 0, 0x80,
+ * 0x100, 0x180) to select a specific snooper. Those 'snoop port
+ * address' bits should be dropped when processing the operations as
+ * they are all equivalent.
+ */
+#define TM_ADDRESS_MASK 0xC3F
+#define TM_SPECIAL_OP 0x800
+#define TM_RING_OFFSET 0x30
+#define TM_REG_OFFSET 0x3F
+
/* TM register offsets */
#define TM_QW0_USER 0x000 /* All rings */
#define TM_QW1_OS 0x010 /* Ring 0..2 */
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 10c4ffa148..0ee2adc105 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1114,8 +1114,9 @@ struct CPUArchState {
target_ulong ov32;
target_ulong ca32;
- target_ulong reserve_addr; /* Reservation address */
- target_ulong reserve_val; /* Reservation value */
+ target_ulong reserve_addr; /* Reservation address */
+ target_ulong reserve_length; /* Reservation larx op size (bytes) */
+ target_ulong reserve_val; /* Reservation value */
target_ulong reserve_val2;
/* These are used in supervisor mode only */
@@ -1194,6 +1195,7 @@ struct CPUArchState {
int error_code;
uint32_t pending_interrupts;
#if !defined(CONFIG_USER_ONLY)
+ uint64_t excp_stats[POWERPC_EXCP_NB];
/*
* This is the IRQ controller, which is implementation dependent and only
* relevant when emulating a complete machine. Note that this isn't used
@@ -1424,15 +1426,10 @@ void store_booke_tsr(CPUPPCState *env, target_ulong val);
void ppc_tlb_invalidate_all(CPUPPCState *env);
void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr);
void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp);
-int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
- hwaddr *raddrp, target_ulong address,
- uint32_t pid);
-int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
- hwaddr *raddrp,
- target_ulong address, uint32_t pid, int ext,
- int i);
-hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
- ppcmas_tlb_t *tlb);
+int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp,
+ target_ulong address, uint32_t pid);
+int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid);
+hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb);
#endif
void ppc_store_fpscr(CPUPPCState *env, target_ulong val);
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index 05bf73296b..9f97222655 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -48,6 +48,7 @@
#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
+#include "hw/intc/intc.h"
#endif
/* #define PPC_DEBUG_SPR */
@@ -7083,7 +7084,7 @@ static void ppc_cpu_reset_hold(Object *obj)
if (env->mmu_model != POWERPC_MMU_REAL) {
ppc_tlb_invalidate_all(env);
}
- pmu_update_summaries(env);
+ pmu_mmcr01_updated(env);
}
/* clean any pending stop state */
@@ -7123,6 +7124,16 @@ static bool ppc_cpu_is_big_endian(CPUState *cs)
return !FIELD_EX64(env->msr, MSR, LE);
}
+static bool ppc_get_irq_stats(InterruptStatsProvider *obj,
+ uint64_t **irq_counts, unsigned int *nb_irqs)
+{
+ CPUPPCState *env = &POWERPC_CPU(obj)->env;
+
+ *irq_counts = env->excp_stats;
+ *nb_irqs = ARRAY_SIZE(env->excp_stats);
+ return true;
+}
+
#ifdef CONFIG_TCG
static void ppc_cpu_exec_enter(CPUState *cs)
{
@@ -7286,6 +7297,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = ppc_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY
cc->sysemu_ops = &ppc_sysemu_ops;
+ INTERRUPT_STATS_PROVIDER_CLASS(oc)->get_statistics = ppc_get_irq_stats;
#endif
cc->gdb_num_core_regs = 71;
@@ -7323,6 +7335,12 @@ static const TypeInfo ppc_cpu_type_info = {
.abstract = true,
.class_size = sizeof(PowerPCCPUClass),
.class_init = ppc_cpu_class_init,
+#ifndef CONFIG_USER_ONLY
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
+#endif
};
#ifndef CONFIG_USER_ONLY
@@ -7392,8 +7410,8 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
qemu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' ');
}
- qemu_fprintf(f, " ] RES " TARGET_FMT_lx "\n",
- env->reserve_addr);
+ qemu_fprintf(f, " ] RES %03x@" TARGET_FMT_lx "\n",
+ (int)env->reserve_length, env->reserve_addr);
if (flags & CPU_DUMP_FPU) {
for (i = 0; i < 32; i++) {
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index fea9221501..12d8a7257b 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -1358,9 +1358,12 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
/*
* We don't want to generate a Hypervisor Emulation Assistance
- * Interrupt if we don't have HVB in msr_mask (PAPR mode).
+ * Interrupt if we don't have HVB in msr_mask (PAPR mode),
+ * unless running a nested-hv guest, in which case the L1
+ * kernel wants the interrupt.
*/
- if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB)) {
+ if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) &&
+ !books_vhyp_handles_hv_excp(cpu)) {
excp = POWERPC_EXCP_PROGRAM;
}
@@ -1539,6 +1542,8 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_DSEG: /* Data segment exception */
case POWERPC_EXCP_ISEG: /* Instruction segment exception */
case POWERPC_EXCP_TRACE: /* Trace exception */
+ case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */
+ case POWERPC_EXCP_PERFM: /* Performance monitor interrupt */
break;
case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
msr |= env->error_code;
@@ -1581,10 +1586,8 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
*/
return;
case POWERPC_EXCP_THERM: /* Thermal interrupt */
- case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
case POWERPC_EXCP_VPUA: /* Vector assist exception */
case POWERPC_EXCP_MAINT: /* Maintenance exception */
- case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */
case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */
cpu_abort(cs, "%s exception not implemented\n",
powerpc_excp_name(excp));
@@ -1652,6 +1655,7 @@ static void powerpc_excp(PowerPCCPU *cpu, int excp)
qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
" => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
excp, env->error_code);
+ env->excp_stats[excp]++;
switch (env->excp_model) {
case POWERPC_EXCP_40x:
@@ -3068,7 +3072,7 @@ void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
return;
}
- ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
+ ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
}
/*
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
index fb351c303f..bc7e9d7eda 100644
--- a/target/ppc/helper_regs.c
+++ b/target/ppc/helper_regs.c
@@ -47,6 +47,48 @@ void hreg_swap_gpr_tgpr(CPUPPCState *env)
env->tgpr[3] = tmp;
}
+static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env)
+{
+ uint32_t hflags = 0;
+
+#if defined(TARGET_PPC64)
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) {
+ hflags |= 1 << HFLAGS_PMCC0;
+ }
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) {
+ hflags |= 1 << HFLAGS_PMCC1;
+ }
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) {
+ hflags |= 1 << HFLAGS_PMCJCE;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (env->pmc_ins_cnt) {
+ hflags |= 1 << HFLAGS_INSN_CNT;
+ }
+ if (env->pmc_ins_cnt & 0x1e) {
+ hflags |= 1 << HFLAGS_PMC_OTHER;
+ }
+#endif
+#endif
+
+ return hflags;
+}
+
+/* Mask of all PMU hflags */
+static uint32_t hreg_compute_pmu_hflags_mask(CPUPPCState *env)
+{
+ uint32_t hflags_mask = 0;
+#if defined(TARGET_PPC64)
+ hflags_mask |= 1 << HFLAGS_PMCC0;
+ hflags_mask |= 1 << HFLAGS_PMCC1;
+ hflags_mask |= 1 << HFLAGS_PMCJCE;
+ hflags_mask |= 1 << HFLAGS_INSN_CNT;
+ hflags_mask |= 1 << HFLAGS_PMC_OTHER;
+#endif
+ return hflags_mask;
+}
+
static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
{
target_ulong msr = env->msr;
@@ -104,30 +146,12 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
if (env->spr[SPR_LPCR] & LPCR_HR) {
hflags |= 1 << HFLAGS_HR;
}
- if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) {
- hflags |= 1 << HFLAGS_PMCC0;
- }
- if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) {
- hflags |= 1 << HFLAGS_PMCC1;
- }
- if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) {
- hflags |= 1 << HFLAGS_PMCJCE;
- }
#ifndef CONFIG_USER_ONLY
if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) {
hflags |= 1 << HFLAGS_HV;
}
-#if defined(TARGET_PPC64)
- if (env->pmc_ins_cnt) {
- hflags |= 1 << HFLAGS_INSN_CNT;
- }
- if (env->pmc_ins_cnt & 0x1e) {
- hflags |= 1 << HFLAGS_PMC_OTHER;
- }
-#endif
-
/*
* This is our encoding for server processors. The architecture
* specifies that there is no such thing as userspace with
@@ -172,6 +196,8 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
hflags |= dmmu_idx << HFLAGS_DMMU_IDX;
#endif
+ hflags |= hreg_compute_pmu_hflags_value(env);
+
return hflags | (msr & msr_mask);
}
@@ -180,6 +206,17 @@ void hreg_compute_hflags(CPUPPCState *env)
env->hflags = hreg_compute_hflags_value(env);
}
+/*
+ * This can be used as a lighter-weight alternative to hreg_compute_hflags
+ * when PMU MMCR0 or pmc_ins_cnt changes. pmc_ins_cnt is changed by
+ * pmu_update_summaries.
+ */
+void hreg_update_pmu_hflags(CPUPPCState *env)
+{
+ env->hflags &= ~hreg_compute_pmu_hflags_mask(env);
+ env->hflags |= hreg_compute_pmu_hflags_value(env);
+}
+
#ifdef CONFIG_DEBUG_TCG
void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h
index 42f26870b9..8196c1346d 100644
--- a/target/ppc/helper_regs.h
+++ b/target/ppc/helper_regs.h
@@ -22,6 +22,7 @@
void hreg_swap_gpr_tgpr(CPUPPCState *env);
void hreg_compute_hflags(CPUPPCState *env);
+void hreg_update_pmu_hflags(CPUPPCState *env);
void cpu_interrupt_exittb(CPUState *cs);
int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv);
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index be6eb3d968..134b16c625 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -21,10 +21,6 @@ static void post_load_update_msr(CPUPPCState *env)
*/
env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
ppc_store_msr(env, msr);
-
- if (tcg_enabled()) {
- pmu_update_summaries(env);
- }
}
static int get_avr(QEMUFile *f, void *pv, size_t size,
@@ -317,6 +313,10 @@ static int cpu_post_load(void *opaque, int version_id)
post_load_update_msr(env);
+ if (tcg_enabled()) {
+ pmu_mmcr01_updated(env);
+ }
+
return 0;
}
diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c
index 7235a4befe..ae1db6e348 100644
--- a/target/ppc/mmu_common.c
+++ b/target/ppc/mmu_common.c
@@ -489,16 +489,15 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
}
/* Generic TLB check function for embedded PowerPC implementations */
-int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
- hwaddr *raddrp,
- target_ulong address, uint32_t pid, int ext,
- int i)
+static bool ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddrp,
+ target_ulong address, uint32_t pid, int i)
{
target_ulong mask;
/* Check valid flag */
if (!(tlb->prot & PAGE_VALID)) {
- return -1;
+ return false;
}
mask = ~(tlb->size - 1);
qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx
@@ -507,19 +506,30 @@ int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
mask, (uint32_t)tlb->PID, tlb->prot);
/* Check PID */
if (tlb->PID != 0 && tlb->PID != pid) {
- return -1;
+ return false;
}
/* Check effective address */
if ((address & mask) != tlb->EPN) {
- return -1;
+ return false;
}
*raddrp = (tlb->RPN & mask) | (address & ~mask);
- if (ext) {
- /* Extend the physical address to 36 bits */
- *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
- }
+ return true;
+}
- return 0;
+/* Generic TLB search function for PowerPC embedded implementations */
+int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid)
+{
+ ppcemb_tlb_t *tlb;
+ hwaddr raddr;
+ int i;
+
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, i)) {
+ return i;
+ }
+ }
+ return -1;
}
static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
@@ -535,8 +545,8 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
pr = FIELD_EX64(env->msr, MSR, PR);
for (i = 0; i < env->nb_tlb; i++) {
tlb = &env->tlb.tlbe[i];
- if (ppcemb_tlb_check(env, tlb, &raddr, address,
- env->spr[SPR_40x_PID], 0, i) < 0) {
+ if (!ppcemb_tlb_check(env, tlb, &raddr, address,
+ env->spr[SPR_40x_PID], i)) {
continue;
}
zsel = (tlb->attr >> 4) & 0xF;
@@ -591,34 +601,39 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
return ret;
}
-static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
- hwaddr *raddr, int *prot, target_ulong address,
- MMUAccessType access_type, int i)
+static bool mmubooke_check_pid(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddr, target_ulong addr, int i)
{
- int prot2;
-
- if (ppcemb_tlb_check(env, tlb, raddr, address,
- env->spr[SPR_BOOKE_PID],
- !env->nb_pids, i) >= 0) {
- goto found_tlb;
+ if (ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID], i)) {
+ if (!env->nb_pids) {
+ /* Extend the physical address to 36 bits */
+ *raddr |= (uint64_t)(tlb->RPN & 0xF) << 32;
+ }
+ return true;
+ } else if (!env->nb_pids) {
+ return false;
}
-
if (env->spr[SPR_BOOKE_PID1] &&
- ppcemb_tlb_check(env, tlb, raddr, address,
- env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
- goto found_tlb;
+ ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID1], i)) {
+ return true;
}
-
if (env->spr[SPR_BOOKE_PID2] &&
- ppcemb_tlb_check(env, tlb, raddr, address,
- env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
- goto found_tlb;
+ ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID2], i)) {
+ return true;
}
+ return false;
+}
- qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__);
- return -1;
+static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddr, int *prot, target_ulong address,
+ MMUAccessType access_type, int i)
+{
+ int prot2;
-found_tlb:
+ if (!mmubooke_check_pid(env, tlb, raddr, address, i)) {
+ qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__);
+ return -1;
+ }
if (FIELD_EX64(env->msr, MSR, PR)) {
prot2 = tlb->prot & 0xF;
@@ -677,8 +692,7 @@ static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
return ret;
}
-hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
- ppcmas_tlb_t *tlb)
+hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb)
{
int tlbm_size;
@@ -688,9 +702,8 @@ hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
}
/* TLB check function for MAS based SoftTLBs */
-int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
- hwaddr *raddrp, target_ulong address,
- uint32_t pid)
+int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp,
+ target_ulong address, uint32_t pid)
{
hwaddr mask;
uint32_t tlb_pid;
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index 64e30435f5..d3ea7588f9 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -112,27 +112,6 @@ static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
env->last_way = way;
}
-/* Generic TLB search function for PowerPC embedded implementations */
-static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
- uint32_t pid)
-{
- ppcemb_tlb_t *tlb;
- hwaddr raddr;
- int i, ret;
-
- /* Default return value is no match */
- ret = -1;
- for (i = 0; i < env->nb_tlb; i++) {
- tlb = &env->tlb.tlbe[i];
- if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
- ret = i;
- break;
- }
- }
-
- return ret;
-}
-
/* Helpers specific to PowerPC 40x implementations */
static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
{
@@ -168,15 +147,6 @@ static void booke206_flush_tlb(CPUPPCState *env, int flags,
tlb_flush(env_cpu(env));
}
-static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
- target_ulong eaddr, MMUAccessType access_type,
- int type)
-{
- return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0);
-}
-
-
-
/*****************************************************************************/
/* BATs management */
#if !defined(FLUSH_ALL_TLBS)
@@ -643,7 +613,7 @@ target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
*/
nb_BATs = env->nb_BATs;
env->nb_BATs = 0;
- if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
+ if (get_physical_address_wtlb(env, &ctx, addr, 0, ACCESS_INT, 0) == 0) {
ret = ctx.raddr;
}
env->nb_BATs = nb_BATs;
diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c
index 64a64865d7..7bb4bf81f7 100644
--- a/target/ppc/power8-pmu.c
+++ b/target/ppc/power8-pmu.c
@@ -31,7 +31,11 @@ static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
}
-void pmu_update_summaries(CPUPPCState *env)
+/*
+ * Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt.
+ * hflags must subsequently be updated.
+ */
+static void pmu_update_summaries(CPUPPCState *env)
{
target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
@@ -39,7 +43,7 @@ void pmu_update_summaries(CPUPPCState *env)
int cyc_cnt = 0;
if (mmcr0 & MMCR0_FC) {
- goto hflags_calc;
+ goto out;
}
if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
@@ -73,10 +77,28 @@ void pmu_update_summaries(CPUPPCState *env)
ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
- hflags_calc:
+ out:
env->pmc_ins_cnt = ins_cnt;
env->pmc_cyc_cnt = cyc_cnt;
- env->hflags = deposit32(env->hflags, HFLAGS_INSN_CNT, 1, ins_cnt != 0);
+}
+
+void pmu_mmcr01_updated(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ pmu_update_summaries(env);
+ hreg_update_pmu_hflags(env);
+
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAO) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
+ } else {
+ ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 0);
+ }
+
+ /*
+ * Should this update overflow timers (if mmcr0 is updated) so they
+ * get set in cpu_post_load?
+ */
}
static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
@@ -234,18 +256,11 @@ static void pmu_delete_timers(CPUPPCState *env)
void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
{
- bool hflags_pmcc0 = (value & MMCR0_PMCC0) != 0;
- bool hflags_pmcc1 = (value & MMCR0_PMCC1) != 0;
-
pmu_update_cycles(env);
env->spr[SPR_POWER_MMCR0] = value;
- /* MMCR0 writes can change HFLAGS_PMCC[01] and HFLAGS_INSN_CNT */
- env->hflags = deposit32(env->hflags, HFLAGS_PMCC0, 1, hflags_pmcc0);
- env->hflags = deposit32(env->hflags, HFLAGS_PMCC1, 1, hflags_pmcc1);
-
- pmu_update_summaries(env);
+ pmu_mmcr01_updated(env);
/* Update cycle overflow timers with the current MMCR0 state */
pmu_update_overflow_timers(env);
@@ -257,8 +272,7 @@ void helper_store_mmcr1(CPUPPCState *env, uint64_t value)
env->spr[SPR_POWER_MMCR1] = value;
- /* MMCR1 writes can change HFLAGS_INSN_CNT */
- pmu_update_summaries(env);
+ pmu_mmcr01_updated(env);
}
target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)
@@ -277,18 +291,17 @@ void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value)
pmc_update_overflow_timer(env, sprn);
}
-static void fire_PMC_interrupt(PowerPCCPU *cpu)
+static void perfm_alert(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
pmu_update_cycles(env);
if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) {
- env->spr[SPR_POWER_MMCR0] &= ~MMCR0_FCECE;
env->spr[SPR_POWER_MMCR0] |= MMCR0_FC;
- /* Changing MMCR0_FC requires a new HFLAGS_INSN_CNT calc */
- pmu_update_summaries(env);
+ /* Changing MMCR0_FC requires summaries and hflags update */
+ pmu_mmcr01_updated(env);
/*
* Delete all pending timers if we need to freeze
@@ -299,8 +312,10 @@ static void fire_PMC_interrupt(PowerPCCPU *cpu)
}
if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) {
+ /* These MMCR0 bits do not require summaries or hflags update. */
env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE;
env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO;
+ ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
}
raise_ebb_perfm_exception(env);
@@ -309,20 +324,17 @@ static void fire_PMC_interrupt(PowerPCCPU *cpu)
void helper_handle_pmc5_overflow(CPUPPCState *env)
{
env->spr[SPR_POWER_PMC5] = PMC_COUNTER_NEGATIVE_VAL;
- fire_PMC_interrupt(env_archcpu(env));
+ perfm_alert(env_archcpu(env));
}
/* This helper assumes that the PMC is running. */
void helper_insns_inc(CPUPPCState *env, uint32_t num_insns)
{
bool overflow_triggered;
- PowerPCCPU *cpu;
overflow_triggered = pmu_increment_insns(env, num_insns);
-
if (overflow_triggered) {
- cpu = env_archcpu(env);
- fire_PMC_interrupt(cpu);
+ perfm_alert(env_archcpu(env));
}
}
@@ -330,7 +342,7 @@ static void cpu_ppc_pmu_timer_cb(void *opaque)
{
PowerPCCPU *cpu = opaque;
- fire_PMC_interrupt(cpu);
+ perfm_alert(cpu);
}
void cpu_ppc_pmu_init(CPUPPCState *env)
diff --git a/target/ppc/power8-pmu.h b/target/ppc/power8-pmu.h
index c0093e2219..775e640053 100644
--- a/target/ppc/power8-pmu.h
+++ b/target/ppc/power8-pmu.h
@@ -18,10 +18,10 @@
#define PMC_COUNTER_NEGATIVE_VAL 0x80000000UL
void cpu_ppc_pmu_init(CPUPPCState *env);
-void pmu_update_summaries(CPUPPCState *env);
+void pmu_mmcr01_updated(CPUPPCState *env);
#else
static inline void cpu_ppc_pmu_init(CPUPPCState *env) { }
-static inline void pmu_update_summaries(CPUPPCState *env) { }
+static inline void pmu_mmcr01_updated(CPUPPCState *env) { }
#endif
#endif
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 37fd431870..b591f2e496 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -75,6 +75,7 @@ static TCGv cpu_cfar;
#endif
static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
static TCGv cpu_reserve;
+static TCGv cpu_reserve_length;
static TCGv cpu_reserve_val;
static TCGv cpu_reserve_val2;
static TCGv cpu_fpscr;
@@ -143,6 +144,10 @@ void ppc_translate_init(void)
cpu_reserve = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, reserve_addr),
"reserve_addr");
+ cpu_reserve_length = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState,
+ reserve_length),
+ "reserve_length");
cpu_reserve_val = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, reserve_val),
"reserve_val");
@@ -3469,8 +3474,8 @@ static void gen_load_locked(DisasContext *ctx, MemOp memop)
gen_addr_reg_index(ctx, t0);
tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop));
tcg_gen_mov_tl(cpu_reserve_val, gpr);
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
}
#define LARX(name, memop) \
@@ -3692,35 +3697,32 @@ static void gen_stdat(DisasContext *ctx)
static void gen_conditional_store(DisasContext *ctx, MemOp memop)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- int reg = rS(ctx->opcode);
+ TCGLabel *lfail;
+ TCGv EA;
+ TCGv cr0;
+ TCGv t0;
+ int rs = rS(ctx->opcode);
+
+ lfail = gen_new_label();
+ EA = tcg_temp_new();
+ cr0 = tcg_temp_new();
+ t0 = tcg_temp_new();
+ tcg_gen_mov_tl(cr0, cpu_so);
gen_set_access_type(ctx, ACCESS_RES);
- gen_addr_reg_index(ctx, t0);
- tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail);
- t0 = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
- cpu_gpr[reg], ctx->mem_idx,
+ cpu_gpr[rs], ctx->mem_idx,
DEF_MEMOP(memop) | MO_ALIGN);
tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
- tcg_gen_or_tl(t0, t0, cpu_so);
- tcg_gen_trunc_tl_i32(cpu_crf[0], t0);
- tcg_gen_br(l2);
-
- gen_set_label(l1);
-
- /*
- * Address mismatch implies failure. But we still need to provide
- * the memory barrier semantics of the instruction.
- */
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_or_tl(cr0, cr0, t0);
- gen_set_label(l2);
+ gen_set_label(lfail);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
tcg_gen_movi_tl(cpu_reserve, -1);
}
@@ -3765,6 +3767,8 @@ static void gen_lqarx(DisasContext *ctx)
tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN));
tcg_gen_extr_i128_i64(lo, hi, t16);
+ tcg_gen_mov_tl(cpu_reserve, EA);
+ tcg_gen_movi_tl(cpu_reserve_length, 16);
tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
}
@@ -3772,24 +3776,26 @@ static void gen_lqarx(DisasContext *ctx)
/* stqcx. */
static void gen_stqcx_(DisasContext *ctx)
{
- TCGLabel *lab_fail, *lab_over;
- int rs = rS(ctx->opcode);
+ TCGLabel *lfail;
TCGv EA, t0, t1;
+ TCGv cr0;
TCGv_i128 cmp, val;
+ int rs = rS(ctx->opcode);
if (unlikely(rs & 1)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
- lab_fail = gen_new_label();
- lab_over = gen_new_label();
+ lfail = gen_new_label();
+ EA = tcg_temp_new();
+ cr0 = tcg_temp_new();
+ tcg_gen_mov_tl(cr0, cpu_so);
gen_set_access_type(ctx, ACCESS_RES);
- EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
-
- tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail);
cmp = tcg_temp_new_i128();
val = tcg_temp_new_i128();
@@ -3812,20 +3818,10 @@ static void gen_stqcx_(DisasContext *ctx)
tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
- tcg_gen_or_tl(t0, t0, cpu_so);
- tcg_gen_trunc_tl_i32(cpu_crf[0], t0);
-
- tcg_gen_br(lab_over);
- gen_set_label(lab_fail);
-
- /*
- * Address mismatch implies failure. But we still need to provide
- * the memory barrier semantics of the instruction.
- */
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_or_tl(cr0, cr0, t0);
- gen_set_label(lab_over);
+ gen_set_label(lfail);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
tcg_gen_movi_tl(cpu_reserve, -1);
}
#endif /* defined(TARGET_PPC64) */
diff --git a/tests/avocado/tuxrun_baselines.py b/tests/avocado/tuxrun_baselines.py
index 3a46e7a745..e12250eabb 100644
--- a/tests/avocado/tuxrun_baselines.py
+++ b/tests/avocado/tuxrun_baselines.py
@@ -184,6 +184,7 @@ class TuxRunBaselineTest(QemuSystemTest):
def ppc64_common_tuxrun(self, sums, prefix):
# add device args to command line.
+ self.require_netdev('user')
self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22',
'-device', 'virtio-net,netdev=vnet')
self.vm.add_args('-netdev', '{"type":"user","id":"hostnet0"}',