On Mon, Aug 09, 2021 at 10:10:50AM -0300, Daniel Henrique Barboza wrote:
This patch starts the counter negative EBB support by enabling PMC1
counter negative condition.
A counter negative condition happens when a performance monitor counter
reaches the value 0x80000000. When that happens, if a counter negative
condition is enabled in that counter, a performance monitor alert is
triggered. For PMC1, this condition is enabled by MMCR0_PMC1CE.
An icount-based logic is used to predict when we need to wake up the timer
to trigger the alert in both PM_INST_CMPL (0x2) and PM_CYC (0x1E) events.
The timer callback will then trigger a PPC_INTERRUPT_PMC which will become a
event-based exception later.
Some EBB powerpc kernel selftests are passing after this patch, but a
substancial amount of them relies on other PMCs to be enabled and events
that we don't support at this moment. We'll address that in the next
patches.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
---
target/ppc/cpu.h | 1 +
target/ppc/pmu_book3s_helper.c | 127 +++++++++++++++++++++++----------
2 files changed, 92 insertions(+), 36 deletions(-)
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 1d38b8cf7a..5c81d459f4 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -350,6 +350,7 @@ typedef struct ppc_v3_pate_t {
#define MMCR0_EBE PPC_BIT(43) /* Perf Monitor EBB Enable */
#define MMCR0_FCECE PPC_BIT(38) /* FC on Enabled Cond or Event */
#define MMCR0_PMCC PPC_BITMASK(44, 45) /* PMC Control */
+#define MMCR0_PMC1CE PPC_BIT(48)
#define MMCR1_PMC1SEL_SHIFT (63 - 39)
#define MMCR1_PMC1SEL PPC_BITMASK(32, 39)
diff --git a/target/ppc/pmu_book3s_helper.c b/target/ppc/pmu_book3s_helper.c
index 43cc0eb722..58ae65e22b 100644
--- a/target/ppc/pmu_book3s_helper.c
+++ b/target/ppc/pmu_book3s_helper.c
@@ -25,6 +25,7 @@
* and SPAPR code.
*/
#define PPC_CPU_FREQ 1000000000
+#define COUNTER_NEGATIVE_VAL 0x80000000
static uint64_t get_cycles(uint64_t icount_delta)
{
@@ -32,22 +33,9 @@ static uint64_t get_cycles(uint64_t icount_delta)
NANOSECONDS_PER_SECOND);
}
-static void update_PMC_PM_INST_CMPL(CPUPPCState *env, int sprn,
- uint64_t icount_delta)
-{
- env->spr[sprn] += icount_delta;
-}
-
-static void update_PMC_PM_CYC(CPUPPCState *env, int sprn,
- uint64_t icount_delta)
-{
- env->spr[sprn] += get_cycles(icount_delta);
-}
-
-static void update_programmable_PMC_reg(CPUPPCState *env, int sprn,
- uint64_t icount_delta)
+static uint8_t get_PMC_event(CPUPPCState *env, int sprn)
{
- int event;
+ int event = 0x0;
switch (sprn) {
case SPR_POWER_PMC1:
@@ -65,11 +53,35 @@ static void update_programmable_PMC_reg(CPUPPCState *env,
int sprn,
case SPR_POWER_PMC4:
event = MMCR1_PMC4SEL & env->spr[SPR_POWER_MMCR1];
break;
+ case SPR_POWER_PMC5:
+ event = 0x2;
+ break;
+ case SPR_POWER_PMC6:
+ event = 0x1E;
+ break;
This looks like a nice cleanup that would be better folded into an
earlier patch.
default:
- return;
+ break;
}
- switch (event) {
+ return event;
+}
+
+static void update_PMC_PM_INST_CMPL(CPUPPCState *env, int sprn,
+ uint64_t icount_delta)
+{
+ env->spr[sprn] += icount_delta;
+}
+
+static void update_PMC_PM_CYC(CPUPPCState *env, int sprn,
+ uint64_t icount_delta)
+{
+ env->spr[sprn] += get_cycles(icount_delta);
+}
+
+static void update_programmable_PMC_reg(CPUPPCState *env, int sprn,
+ uint64_t icount_delta)
+{
+ switch (get_PMC_event(env, sprn)) {
case 0x2:
update_PMC_PM_INST_CMPL(env, sprn, icount_delta);
break;
@@ -99,30 +111,57 @@ static void update_PMCs(CPUPPCState *env, uint64_t
icount_delta)
update_PMC_PM_CYC(env, SPR_POWER_PMC6, icount_delta);
}
+static void set_PMU_excp_timer(CPUPPCState *env)
+{
+ uint64_t timeout, now, remaining_val;
+
+ if (!(env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE)) {
+ return;
+ }
+
+ remaining_val = COUNTER_NEGATIVE_VAL - env->spr[SPR_POWER_PMC1];
+
+ switch (get_PMC_event(env, SPR_POWER_PMC1)) {
+ case 0x2:
+ timeout = icount_to_ns(remaining_val);
+ break;
+ case 0x1e:
+ timeout = muldiv64(remaining_val, NANOSECONDS_PER_SECOND,
+ PPC_CPU_FREQ);
So.. this appears to be simulating to the guest that cycles are
occurring at a constant rate, consistent with the advertised CPU
frequency. Which sounds right, except... it's not clear to me that
you're using the same logic to generate the values you read from the
cycles PMC (in that case it shouldn't need to reference icount at all,
right?).