aboutsummaryrefslogtreecommitdiff
path: root/target/ppc/power8-pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/ppc/power8-pmu.c')
-rw-r--r--target/ppc/power8-pmu.c238
1 files changed, 104 insertions, 134 deletions
diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c
index 08d1902cd5..236e8e66e9 100644
--- a/target/ppc/power8-pmu.c
+++ b/target/ppc/power8-pmu.c
@@ -11,8 +11,6 @@
*/
#include "qemu/osdep.h"
-
-#include "power8-pmu.h"
#include "cpu.h"
#include "helper_regs.h"
#include "exec/exec-all.h"
@@ -20,24 +18,12 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "hw/ppc/ppc.h"
+#include "power8-pmu.h"
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
#define PMC_COUNTER_NEGATIVE_VAL 0x80000000UL
-static bool pmc_is_inactive(CPUPPCState *env, int sprn)
-{
- if (env->spr[SPR_POWER_MMCR0] & MMCR0_FC) {
- return true;
- }
-
- if (sprn < SPR_POWER_PMC5) {
- return env->spr[SPR_POWER_MMCR0] & MMCR0_FC14;
- }
-
- return env->spr[SPR_POWER_MMCR0] & MMCR0_FC56;
-}
-
static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
{
if (sprn == SPR_POWER_PMC1) {
@@ -47,135 +33,115 @@ static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
}
-/*
- * For PMCs 1-4, IBM POWER chips has support for an implementation
- * dependent event, 0x1E, that enables cycle counting. The Linux kernel
- * makes extensive use of 0x1E, so let's also support it.
- *
- * Likewise, event 0x2 is an implementation-dependent event that IBM
- * POWER chips implement (at least since POWER8) that is equivalent to
- * PM_INST_CMPL. Let's support this event on PMCs 1-4 as well.
- */
-static PMUEventType pmc_get_event(CPUPPCState *env, int sprn)
+void pmu_update_summaries(CPUPPCState *env)
{
- uint8_t mmcr1_evt_extr[] = { MMCR1_PMC1EVT_EXTR, MMCR1_PMC2EVT_EXTR,
- MMCR1_PMC3EVT_EXTR, MMCR1_PMC4EVT_EXTR };
- PMUEventType evt_type = PMU_EVENT_INVALID;
- uint8_t pmcsel;
- int i;
-
- if (pmc_is_inactive(env, sprn)) {
- return PMU_EVENT_INACTIVE;
- }
-
- if (sprn == SPR_POWER_PMC5) {
- return PMU_EVENT_INSTRUCTIONS;
- }
+ target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
+ target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
+ int ins_cnt = 0;
+ int cyc_cnt = 0;
- if (sprn == SPR_POWER_PMC6) {
- return PMU_EVENT_CYCLES;
+ if (mmcr0 & MMCR0_FC) {
+ goto hflags_calc;
}
- i = sprn - SPR_POWER_PMC1;
- pmcsel = extract64(env->spr[SPR_POWER_MMCR1], mmcr1_evt_extr[i],
- MMCR1_EVT_SIZE);
-
- switch (pmcsel) {
- case 0x2:
- evt_type = PMU_EVENT_INSTRUCTIONS;
- break;
- case 0x1E:
- evt_type = PMU_EVENT_CYCLES;
- break;
- case 0xF0:
- /*
- * PMC1SEL = 0xF0 is the architected PowerISA v3.1
- * event that counts cycles using PMC1.
- */
- if (sprn == SPR_POWER_PMC1) {
- evt_type = PMU_EVENT_CYCLES;
- }
- break;
- case 0xFA:
- /*
- * PMC4SEL = 0xFA is the "instructions completed
- * with run latch set" event.
- */
- if (sprn == SPR_POWER_PMC4) {
- evt_type = PMU_EVENT_INSN_RUN_LATCH;
- }
- break;
- case 0xFE:
- /*
- * PMC1SEL = 0xFE is the architected PowerISA v3.1
- * event to sample instructions using PMC1.
- */
- if (sprn == SPR_POWER_PMC1) {
- evt_type = PMU_EVENT_INSTRUCTIONS;
+ if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
+ target_ulong sel;
+
+ sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE);
+ switch (sel) {
+ case 0x02:
+ case 0xfe:
+ ins_cnt |= 1 << 1;
+ break;
+ case 0x1e:
+ case 0xf0:
+ cyc_cnt |= 1 << 1;
+ break;
}
- break;
- default:
- break;
- }
- return evt_type;
-}
+ sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE);
+ ins_cnt |= (sel == 0x02) << 2;
+ cyc_cnt |= (sel == 0x1e) << 2;
-bool pmu_insn_cnt_enabled(CPUPPCState *env)
-{
- int sprn;
+ sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE);
+ ins_cnt |= (sel == 0x02) << 3;
+ cyc_cnt |= (sel == 0x1e) << 3;
- for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC5; sprn++) {
- if (pmc_get_event(env, sprn) == PMU_EVENT_INSTRUCTIONS ||
- pmc_get_event(env, sprn) == PMU_EVENT_INSN_RUN_LATCH) {
- return true;
- }
+ sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
+ ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4;
+ cyc_cnt |= (sel == 0x1e) << 4;
}
- return false;
+ ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
+ cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
+
+ hflags_calc:
+ env->pmc_ins_cnt = ins_cnt;
+ env->pmc_cyc_cnt = cyc_cnt;
+ env->hflags = deposit32(env->hflags, HFLAGS_INSN_CNT, 1, ins_cnt != 0);
}
static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
{
+ target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
+ unsigned ins_cnt = env->pmc_ins_cnt;
bool overflow_triggered = false;
- int sprn;
-
- /* PMC6 never counts instructions */
- for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC5; sprn++) {
- PMUEventType evt_type = pmc_get_event(env, sprn);
- bool insn_event = evt_type == PMU_EVENT_INSTRUCTIONS ||
- evt_type == PMU_EVENT_INSN_RUN_LATCH;
-
- if (pmc_is_inactive(env, sprn) || !insn_event) {
- continue;
+ target_ulong tmp;
+
+ if (unlikely(ins_cnt & 0x1e)) {
+ if (ins_cnt & (1 << 1)) {
+ tmp = env->spr[SPR_POWER_PMC1];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC1] = tmp;
}
- if (evt_type == PMU_EVENT_INSTRUCTIONS) {
- env->spr[sprn] += num_insns;
+ if (ins_cnt & (1 << 2)) {
+ tmp = env->spr[SPR_POWER_PMC2];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC2] = tmp;
}
- if (evt_type == PMU_EVENT_INSN_RUN_LATCH &&
- env->spr[SPR_CTRL] & CTRL_RUN) {
- env->spr[sprn] += num_insns;
+ if (ins_cnt & (1 << 3)) {
+ tmp = env->spr[SPR_POWER_PMC3];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC3] = tmp;
}
- if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL &&
- pmc_has_overflow_enabled(env, sprn)) {
+ if (ins_cnt & (1 << 4)) {
+ target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
+ int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
+ if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) {
+ tmp = env->spr[SPR_POWER_PMC4];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC4] = tmp;
+ }
+ }
+ }
+ if (ins_cnt & (1 << 5)) {
+ tmp = env->spr[SPR_POWER_PMC5];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
overflow_triggered = true;
-
- /*
- * The real PMU will always trigger a counter overflow with
- * PMC_COUNTER_NEGATIVE_VAL. We don't have an easy way to
- * do that since we're counting block of instructions at
- * the end of each translation block, and we're probably
- * passing this value at this point.
- *
- * Let's write PMC_COUNTER_NEGATIVE_VAL to the overflowed
- * counter to simulate what the real hardware would do.
- */
- env->spr[sprn] = PMC_COUNTER_NEGATIVE_VAL;
}
+ env->spr[SPR_POWER_PMC5] = tmp;
}
return overflow_triggered;
@@ -185,18 +151,16 @@ static void pmu_update_cycles(CPUPPCState *env)
{
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
uint64_t time_delta = now - env->pmu_base_time;
- int sprn;
+ int sprn, cyc_cnt = env->pmc_cyc_cnt;
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
- if (pmc_get_event(env, sprn) != PMU_EVENT_CYCLES) {
- continue;
+ if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) {
+ /*
+ * The pseries and powernv clock runs at 1Ghz, meaning
+ * that 1 nanosec equals 1 cycle.
+ */
+ env->spr[sprn] += time_delta;
}
-
- /*
- * The pseries and powernv clock runs at 1Ghz, meaning
- * that 1 nanosec equals 1 cycle.
- */
- env->spr[sprn] += time_delta;
}
/* Update base_time for future calculations */
@@ -225,7 +189,7 @@ static void pmc_update_overflow_timer(CPUPPCState *env, int sprn)
return;
}
- if (pmc_get_event(env, sprn) != PMU_EVENT_CYCLES ||
+ if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) ||
!pmc_has_overflow_enabled(env, sprn)) {
/* Overflow timer is not needed for this counter */
timer_del(pmc_overflow_timer);
@@ -233,7 +197,7 @@ static void pmc_update_overflow_timer(CPUPPCState *env, int sprn)
}
if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) {
- timeout = 0;
+ timeout = 0;
} else {
timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn];
}
@@ -260,12 +224,18 @@ static void pmu_update_overflow_timers(CPUPPCState *env)
void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
{
+ bool hflags_pmcc0 = (value & MMCR0_PMCC0) != 0;
+ bool hflags_pmcc1 = (value & MMCR0_PMCC1) != 0;
+
pmu_update_cycles(env);
env->spr[SPR_POWER_MMCR0] = value;
- /* MMCR0 writes can change HFLAGS_PMCCCLEAR and HFLAGS_INSN_CNT */
- hreg_compute_hflags(env);
+ /* MMCR0 writes can change HFLAGS_PMCC[01] and HFLAGS_INSN_CNT */
+ env->hflags = deposit32(env->hflags, HFLAGS_PMCC0, 1, hflags_pmcc0);
+ env->hflags = deposit32(env->hflags, HFLAGS_PMCC1, 1, hflags_pmcc1);
+
+ pmu_update_summaries(env);
/* Update cycle overflow timers with the current MMCR0 state */
pmu_update_overflow_timers(env);
@@ -278,7 +248,7 @@ void helper_store_mmcr1(CPUPPCState *env, uint64_t value)
env->spr[SPR_POWER_MMCR1] = value;
/* MMCR1 writes can change HFLAGS_INSN_CNT */
- hreg_compute_hflags(env);
+ pmu_update_summaries(env);
}
target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)