[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v7 11/17] target/ppc: Don't store VRMA SLBE persistently
From: |
Philippe Mathieu-Daudé |
Subject: |
Re: [PATCH v7 11/17] target/ppc: Don't store VRMA SLBE persistently |
Date: |
Thu, 5 Mar 2020 10:17:03 +0100 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Thunderbird/68.4.1 |
Hi David,
On 3/3/20 4:43 AM, David Gibson wrote:
Currently, we construct the SLBE used for VRMA translations when the LPCR
is written (which controls some bits in the SLBE), then use it later for
translations.
This is a bit complex and confusing - simplify it by simply constructing
the SLBE directly from the LPCR when we need it.
Signed-off-by: David Gibson <address@hidden>
---
target/ppc/cpu.h | 3 --
target/ppc/mmu-hash64.c | 92 ++++++++++++++++-------------------------
2 files changed, 35 insertions(+), 60 deletions(-)
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index f9871b1233..5a55fb02bd 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1044,9 +1044,6 @@ struct CPUPPCState {
uint32_t flags;
uint64_t insns_flags;
uint64_t insns_flags2;
-#if defined(TARGET_PPC64)
- ppc_slb_t vrma_slb;
-#endif
I find it confusing to move this member to the stack, when other slb
stay on the CPUState context on the heap.
Consider amending the following (I can also send a patch later if you
prefer):
-- >8 --
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 5a55fb02bd..ade71c3592 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -988,6 +988,7 @@ struct CPUPPCState {
/* MMU context, only relevant for full system emulation */
#if defined(TARGET_PPC64)
ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */
+ ppc_slb_t vrma_slbe;
#endif
target_ulong sr[32]; /* segment registers */
uint32_t nb_BATs; /* number of BATs */
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index 34f6009b1e..abbdd531c6 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -818,7 +818,6 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu,
vaddr eaddr,
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- ppc_slb_t vrma_slbe;
ppc_slb_t *slb;
unsigned apshift;
hwaddr ptex;
@@ -857,7 +856,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu,
vaddr eaddr,
}
} else if (ppc_hash64_use_vrma(env)) {
/* Emulated VRMA mode */
- slb = &vrma_slbe;
+ slb = &env->vrma_slbe;
if (build_vrma_slbe(cpu, slb) != 0) {
/* Invalid VRMA setup, machine check */
cs->exception_index = POWERPC_EXCP_MCHECK;
@@ -1006,7 +1005,6 @@ skip_slb_search:
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
{
CPUPPCState *env = &cpu->env;
- ppc_slb_t vrma_slbe;
ppc_slb_t *slb;
hwaddr ptex, raddr;
ppc_hash_pte64_t pte;
@@ -1028,7 +1026,7 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU
*cpu, target_ulong addr)
return raddr | env->spr[SPR_HRMOR];
} else if (ppc_hash64_use_vrma(env)) {
/* Emulated VRMA mode */
- slb = &vrma_slbe;
+ slb = &env->vrma_slbe;
if (build_vrma_slbe(cpu, slb) != 0) {
return -1;
}
---
With the hunk amended I dare to give:
Reviewed-by: Philippe Mathieu-Daudé <address@hidden>
int error_code;
uint32_t pending_interrupts;
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index 4fd7b7ee74..34f6009b1e 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -784,11 +784,41 @@ static target_ulong rmls_limit(PowerPCCPU *cpu)
return rma_sizes[rmls];
}
+static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong lpcr = env->spr[SPR_LPCR];
+ uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
+ target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
+ int i;
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
+
+ if (!sps->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
+ slb->esid = SLB_ESID_V;
+ slb->vsid = vsid;
+ slb->sps = sps;
+ return 0;
+ }
+ }
+
+ error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
+ TARGET_FMT_lx"\n", lpcr);
+
+ return -1;
+}
+
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
int rwx, int mmu_idx)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ ppc_slb_t vrma_slbe;
ppc_slb_t *slb;
unsigned apshift;
hwaddr ptex;
@@ -827,8 +857,8 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr
eaddr,
}
} else if (ppc_hash64_use_vrma(env)) {
/* Emulated VRMA mode */
- slb = &env->vrma_slb;
- if (!slb->sps) {
+ slb = &vrma_slbe;
+ if (build_vrma_slbe(cpu, slb) != 0) {
/* Invalid VRMA setup, machine check */
cs->exception_index = POWERPC_EXCP_MCHECK;
env->error_code = 0;
@@ -976,6 +1006,7 @@ skip_slb_search:
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
{
CPUPPCState *env = &cpu->env;
+ ppc_slb_t vrma_slbe;
ppc_slb_t *slb;
hwaddr ptex, raddr;
ppc_hash_pte64_t pte;
@@ -997,8 +1028,8 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu,
target_ulong addr)
return raddr | env->spr[SPR_HRMOR];
} else if (ppc_hash64_use_vrma(env)) {
/* Emulated VRMA mode */
- slb = &env->vrma_slb;
- if (!slb->sps) {
+ slb = &vrma_slbe;
+ if (build_vrma_slbe(cpu, slb) != 0) {
return -1;
}
} else {
@@ -1037,65 +1068,12 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
target_ulong ptex,
cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
}
-static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
-{
- CPUPPCState *env = &cpu->env;
- const PPCHash64SegmentPageSizes *sps = NULL;
- target_ulong esid, vsid, lpcr;
- ppc_slb_t *slb = &env->vrma_slb;
- uint32_t vrmasd;
- int i;
-
- /* First clear it */
- slb->esid = slb->vsid = 0;
- slb->sps = NULL;
-
- /* Is VRMA enabled ? */
- if (!ppc_hash64_use_vrma(env)) {
- return;
- }
-
- /*
- * Make one up. Mostly ignore the ESID which will not be needed
- * for translation
- */
- lpcr = env->spr[SPR_LPCR];
- vsid = SLB_VSID_VRMA;
- vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
- vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
- esid = SLB_ESID_V;
-
- for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
- const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
-
- if (!sps1->page_shift) {
- break;
- }
-
- if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
- sps = sps1;
- break;
- }
- }
-
- if (!sps) {
- error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
- " vsid 0x"TARGET_FMT_lx, esid, vsid);
- return;
- }
-
- slb->vsid = vsid;
- slb->esid = esid;
- slb->sps = sps;
-}
-
void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
{
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
- ppc_hash64_update_vrma(cpu);
}
void helper_store_lpcr(CPUPPCState *env, target_ulong val)
- [PATCH v7 00/17] target/ppc: Correct some errors with real mode handling, David Gibson, 2020/03/02
- [PATCH v7 01/17] ppc: Remove stub support for 32-bit hypervisor mode, David Gibson, 2020/03/02
- [PATCH v7 03/17] target/ppc: Correct handling of real mode accesses with vhyp on hash MMU, David Gibson, 2020/03/02
- [PATCH v7 02/17] ppc: Remove stub of PPC970 HID4 implementation, David Gibson, 2020/03/02
- [PATCH v7 05/17] spapr, ppc: Remove VPM0/RMLS hacks for POWER9, David Gibson, 2020/03/02
- [PATCH v7 04/17] target/ppc: Introduce ppc_hash64_use_vrma() helper, David Gibson, 2020/03/02
- [PATCH v7 11/17] target/ppc: Don't store VRMA SLBE persistently, David Gibson, 2020/03/02
- [PATCH v7 16/17] spapr: Clean up RMA size calculation, David Gibson, 2020/03/02
- [PATCH v7 07/17] target/ppc: Use class fields to simplify LPCR masking, David Gibson, 2020/03/02
- [PATCH v7 17/17] spapr: Fold spapr_node0_size() into its only caller, David Gibson, 2020/03/02