qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC PATCH 4/4] target/ppc: Moved helpers to mmu_helper.c


From: David Gibson
Subject: Re: [RFC PATCH 4/4] target/ppc: Moved helpers to mmu_helper.c
Date: Mon, 7 Jun 2021 12:34:44 +1000

On Wed, Jun 02, 2021 at 04:26:04PM -0300, Lucas Mateus Castro (alqotel) wrote:
> Moved helpers from target/ppc/mmu-hash64.c to target/ppc/mmu_helpers.c
> and removed #ifdef CONFIG_TCG and #include exec/helper-proto.h from
> mmu-hash64.c
> 
> Signed-off-by: Lucas Mateus Castro (alqotel)
> <lucas.araujo@eldorado.org.br>

I'd prefer not to do this.  These helpers used to be in
mmu_helper.c, along with most of the stuff in mmu-*.[ch].  I think the
division by MMU model is more important than the TCG/!TCG distinction,
so I'd prefer to keep these here, even if it means ifdefs.  Eventually
we could consider splitting each of the individual MMU files into
TCG/!TCG parts, but I don't want to go back to having all the helpers
for umpteen very different MMU models all lumped into one giant file.

> ---
> I had to turn slb_lookup in a non static function as it had calls from
> the code that was moved to mmu_helper.c and from the code that wasn't
> moved.
> 
> Also perhaps it would be best to create a new file to move the mmu-hash
> functions that are not compiled in !TCG, personally I thought that
> moving the helpers in mmu-hash64 to mmu_helpers the better choice.
> ---
>  target/ppc/mmu-hash64.c | 219 +---------------------------------------
>  target/ppc/mmu-hash64.h |   1 +
>  target/ppc/mmu_helper.c | 209 ++++++++++++++++++++++++++++++++++++++
>  3 files changed, 211 insertions(+), 218 deletions(-)
> 
> diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
> index 708dffc31b..d2ded71107 100644
> --- a/target/ppc/mmu-hash64.c
> +++ b/target/ppc/mmu-hash64.c
> @@ -32,10 +32,6 @@
>  #include "mmu-book3s-v3.h"
>  #include "helper_regs.h"
>  
> -#ifdef CONFIG_TCG
> -#include "exec/helper-proto.h"
> -#endif
> -
>  /* #define DEBUG_SLB */
>  
>  #ifdef DEBUG_SLB
> @@ -48,7 +44,7 @@
>   * SLB handling
>   */
>  
> -static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
> +ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
>  {
>      CPUPPCState *env = &cpu->env;
>      uint64_t esid_256M, esid_1T;
> @@ -100,114 +96,6 @@ void dump_slb(PowerPCCPU *cpu)
>      }
>  }
>  
> -#ifdef CONFIG_TCG
> -void helper_slbia(CPUPPCState *env, uint32_t ih)
> -{
> -    PowerPCCPU *cpu = env_archcpu(env);
> -    int starting_entry;
> -    int n;
> -
> -    /*
> -     * slbia must always flush all TLB (which is equivalent to ERAT in ppc
> -     * architecture). Matching on SLB_ESID_V is not good enough, because 
> slbmte
> -     * can overwrite a valid SLB without flushing its lookaside information.
> -     *
> -     * It would be possible to keep the TLB in synch with the SLB by flushing
> -     * when a valid entry is overwritten by slbmte, and therefore slbia would
> -     * not have to flush unless it evicts a valid SLB entry. However it is
> -     * expected that slbmte is more common than slbia, and slbia is usually
> -     * going to evict valid SLB entries, so that tradeoff is unlikely to be a
> -     * good one.
> -     *
> -     * ISA v2.05 introduced IH field with values 0,1,2,6. These all 
> invalidate
> -     * the same SLB entries (everything but entry 0), but differ in what
> -     * "lookaside information" is invalidated. TCG can ignore this and flush
> -     * everything.
> -     *
> -     * ISA v3.0 introduced additional values 3,4,7, which change what SLBs 
> are
> -     * invalidated.
> -     */
> -
> -    env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
> -
> -    starting_entry = 1; /* default for IH=0,1,2,6 */
> -
> -    if (env->mmu_model == POWERPC_MMU_3_00) {
> -        switch (ih) {
> -        case 0x7:
> -            /* invalidate no SLBs, but all lookaside information */
> -            return;
> -
> -        case 0x3:
> -        case 0x4:
> -            /* also considers SLB entry 0 */
> -            starting_entry = 0;
> -            break;
> -
> -        case 0x5:
> -            /* treat undefined values as ih==0, and warn */
> -            qemu_log_mask(LOG_GUEST_ERROR,
> -                          "slbia undefined IH field %u.\n", ih);
> -            break;
> -
> -        default:
> -            /* 0,1,2,6 */
> -            break;
> -        }
> -    }
> -
> -    for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) {
> -        ppc_slb_t *slb = &env->slb[n];
> -
> -        if (!(slb->esid & SLB_ESID_V)) {
> -            continue;
> -        }
> -        if (env->mmu_model == POWERPC_MMU_3_00) {
> -            if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) {
> -                /* preserves entries with a class value of 0 */
> -                continue;
> -            }
> -        }
> -
> -        slb->esid &= ~SLB_ESID_V;
> -    }
> -}
> -
> -static void __helper_slbie(CPUPPCState *env, target_ulong addr,
> -                           target_ulong global)
> -{
> -    PowerPCCPU *cpu = env_archcpu(env);
> -    ppc_slb_t *slb;
> -
> -    slb = slb_lookup(cpu, addr);
> -    if (!slb) {
> -        return;
> -    }
> -
> -    if (slb->esid & SLB_ESID_V) {
> -        slb->esid &= ~SLB_ESID_V;
> -
> -        /*
> -         * XXX: given the fact that segment size is 256 MB or 1TB,
> -         *      and we still don't have a tlb_flush_mask(env, n, mask)
> -         *      in QEMU, we just invalidate all TLBs
> -         */
> -        env->tlb_need_flush |=
> -            (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
> -    }
> -}
> -
> -void helper_slbie(CPUPPCState *env, target_ulong addr)
> -{
> -    __helper_slbie(env, addr, false);
> -}
> -
> -void helper_slbieg(CPUPPCState *env, target_ulong addr)
> -{
> -    __helper_slbie(env, addr, true);
> -}
> -#endif
> -
>  int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
>                    target_ulong esid, target_ulong vsid)
>  {
> @@ -260,102 +148,6 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
>      return 0;
>  }
>  
> -#ifdef CONFIG_TCG
> -static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
> -                             target_ulong *rt)
> -{
> -    CPUPPCState *env = &cpu->env;
> -    int slot = rb & 0xfff;
> -    ppc_slb_t *slb = &env->slb[slot];
> -
> -    if (slot >= cpu->hash64_opts->slb_size) {
> -        return -1;
> -    }
> -
> -    *rt = slb->esid;
> -    return 0;
> -}
> -
> -static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
> -                             target_ulong *rt)
> -{
> -    CPUPPCState *env = &cpu->env;
> -    int slot = rb & 0xfff;
> -    ppc_slb_t *slb = &env->slb[slot];
> -
> -    if (slot >= cpu->hash64_opts->slb_size) {
> -        return -1;
> -    }
> -
> -    *rt = slb->vsid;
> -    return 0;
> -}
> -
> -static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
> -                             target_ulong *rt)
> -{
> -    CPUPPCState *env = &cpu->env;
> -    ppc_slb_t *slb;
> -
> -    if (!msr_is_64bit(env, env->msr)) {
> -        rb &= 0xffffffff;
> -    }
> -    slb = slb_lookup(cpu, rb);
> -    if (slb == NULL) {
> -        *rt = (target_ulong)-1ul;
> -    } else {
> -        *rt = slb->vsid;
> -    }
> -    return 0;
> -}
> -
> -void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
> -{
> -    PowerPCCPU *cpu = env_archcpu(env);
> -
> -    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
> -        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
> -                               POWERPC_EXCP_INVAL, GETPC());
> -    }
> -}
> -
> -target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
> -{
> -    PowerPCCPU *cpu = env_archcpu(env);
> -    target_ulong rt = 0;
> -
> -    if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
> -        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
> -                               POWERPC_EXCP_INVAL, GETPC());
> -    }
> -    return rt;
> -}
> -
> -target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
> -{
> -    PowerPCCPU *cpu = env_archcpu(env);
> -    target_ulong rt = 0;
> -
> -    if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
> -        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
> -                               POWERPC_EXCP_INVAL, GETPC());
> -    }
> -    return rt;
> -}
> -
> -target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
> -{
> -    PowerPCCPU *cpu = env_archcpu(env);
> -    target_ulong rt = 0;
> -
> -    if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
> -        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
> -                               POWERPC_EXCP_INVAL, GETPC());
> -    }
> -    return rt;
> -}
> -#endif
> -
>  /* Check No-Execute or Guarded Storage */
>  static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
>                                                ppc_hash_pte64_t pte)
> @@ -1146,15 +938,6 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, 
> target_ulong ptex,
>      cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
>  }
>  
> -#ifdef CONFIG_TCG
> -void helper_store_lpcr(CPUPPCState *env, target_ulong val)
> -{
> -    PowerPCCPU *cpu = env_archcpu(env);
> -
> -    ppc_store_lpcr(cpu, val);
> -}
> -#endif
> -
>  void ppc_hash64_init(PowerPCCPU *cpu)
>  {
>      CPUPPCState *env = &cpu->env;
> diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
> index 4b8b8e7950..44fd7c9d35 100644
> --- a/target/ppc/mmu-hash64.h
> +++ b/target/ppc/mmu-hash64.h
> @@ -7,6 +7,7 @@
>  void dump_slb(PowerPCCPU *cpu);
>  int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
>                    target_ulong esid, target_ulong vsid);
> +ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr);
>  hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
>  int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
>                                  int mmu_idx);
> diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
> index dbf7b398cd..6db2678a89 100644
> --- a/target/ppc/mmu_helper.c
> +++ b/target/ppc/mmu_helper.c
> @@ -1361,3 +1361,211 @@ void helper_check_tlb_flush_global(CPUPPCState *env)
>  
>  
> /*****************************************************************************/
>  
> +#if defined(TARGET_PPC64)
> +void helper_slbia(CPUPPCState *env, uint32_t ih)
> +{
> +    PowerPCCPU *cpu = env_archcpu(env);
> +    int starting_entry;
> +    int n;
> +
> +    /*
> +     * slbia must always flush all TLB (which is equivalent to ERAT in ppc
> +     * architecture). Matching on SLB_ESID_V is not good enough, because 
> slbmte
> +     * can overwrite a valid SLB without flushing its lookaside information.
> +     *
> +     * It would be possible to keep the TLB in synch with the SLB by flushing
> +     * when a valid entry is overwritten by slbmte, and therefore slbia would
> +     * not have to flush unless it evicts a valid SLB entry. However it is
> +     * expected that slbmte is more common than slbia, and slbia is usually
> +     * going to evict valid SLB entries, so that tradeoff is unlikely to be a
> +     * good one.
> +     *
> +     * ISA v2.05 introduced IH field with values 0,1,2,6. These all 
> invalidate
> +     * the same SLB entries (everything but entry 0), but differ in what
> +     * "lookaside information" is invalidated. TCG can ignore this and flush
> +     * everything.
> +     *
> +     * ISA v3.0 introduced additional values 3,4,7, which change what SLBs 
> are
> +     * invalidated.
> +     */
> +
> +    env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
> +
> +    starting_entry = 1; /* default for IH=0,1,2,6 */
> +
> +    if (env->mmu_model == POWERPC_MMU_3_00) {
> +        switch (ih) {
> +        case 0x7:
> +            /* invalidate no SLBs, but all lookaside information */
> +            return;
> +
> +        case 0x3:
> +        case 0x4:
> +            /* also considers SLB entry 0 */
> +            starting_entry = 0;
> +            break;
> +
> +        case 0x5:
> +            /* treat undefined values as ih==0, and warn */
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                          "slbia undefined IH field %u.\n", ih);
> +            break;
> +
> +        default:
> +            /* 0,1,2,6 */
> +            break;
> +        }
> +    }
> +
> +    for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) {
> +        ppc_slb_t *slb = &env->slb[n];
> +
> +        if (!(slb->esid & SLB_ESID_V)) {
> +            continue;
> +        }
> +        if (env->mmu_model == POWERPC_MMU_3_00) {
> +            if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) {
> +                /* preserves entries with a class value of 0 */
> +                continue;
> +            }
> +        }
> +
> +        slb->esid &= ~SLB_ESID_V;
> +    }
> +}
> +
> +static void __helper_slbie(CPUPPCState *env, target_ulong addr,
> +                           target_ulong global)
> +{
> +    PowerPCCPU *cpu = env_archcpu(env);
> +    ppc_slb_t *slb;
> +
> +    slb = slb_lookup(cpu, addr);
> +    if (!slb) {
> +        return;
> +    }
> +
> +    if (slb->esid & SLB_ESID_V) {
> +        slb->esid &= ~SLB_ESID_V;
> +
> +        /*
> +         * XXX: given the fact that segment size is 256 MB or 1TB,
> +         *      and we still don't have a tlb_flush_mask(env, n, mask)
> +         *      in QEMU, we just invalidate all TLBs
> +         */
> +        env->tlb_need_flush |=
> +            (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
> +    }
> +}
> +
> +void helper_slbie(CPUPPCState *env, target_ulong addr)
> +{
> +    __helper_slbie(env, addr, false);
> +}
> +
> +void helper_slbieg(CPUPPCState *env, target_ulong addr)
> +{
> +    __helper_slbie(env, addr, true);
> +}
> +
> +static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
> +                             target_ulong *rt)
> +{
> +    CPUPPCState *env = &cpu->env;
> +    int slot = rb & 0xfff;
> +    ppc_slb_t *slb = &env->slb[slot];
> +
> +    if (slot >= cpu->hash64_opts->slb_size) {
> +        return -1;
> +    }
> +
> +    *rt = slb->esid;
> +    return 0;
> +}
> +
> +static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
> +                             target_ulong *rt)
> +{
> +    CPUPPCState *env = &cpu->env;
> +    int slot = rb & 0xfff;
> +    ppc_slb_t *slb = &env->slb[slot];
> +
> +    if (slot >= cpu->hash64_opts->slb_size) {
> +        return -1;
> +    }
> +
> +    *rt = slb->vsid;
> +    return 0;
> +}
> +
> +static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
> +                             target_ulong *rt)
> +{
> +    CPUPPCState *env = &cpu->env;
> +    ppc_slb_t *slb;
> +
> +    if (!msr_is_64bit(env, env->msr)) {
> +        rb &= 0xffffffff;
> +    }
> +    slb = slb_lookup(cpu, rb);
> +    if (slb == NULL) {
> +        *rt = (target_ulong)-1ul;
> +    } else {
> +        *rt = slb->vsid;
> +    }
> +    return 0;
> +}
> +
> +void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
> +{
> +    PowerPCCPU *cpu = env_archcpu(env);
> +
> +    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
> +        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
> +                               POWERPC_EXCP_INVAL, GETPC());
> +    }
> +}
> +
> +target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
> +{
> +    PowerPCCPU *cpu = env_archcpu(env);
> +    target_ulong rt = 0;
> +
> +    if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
> +        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
> +                               POWERPC_EXCP_INVAL, GETPC());
> +    }
> +    return rt;
> +}
> +
> +target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
> +{
> +    PowerPCCPU *cpu = env_archcpu(env);
> +    target_ulong rt = 0;
> +
> +    if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
> +        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
> +                               POWERPC_EXCP_INVAL, GETPC());
> +    }
> +    return rt;
> +}
> +
> +target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
> +{
> +    PowerPCCPU *cpu = env_archcpu(env);
> +    target_ulong rt = 0;
> +
> +    if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
> +        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
> +                               POWERPC_EXCP_INVAL, GETPC());
> +    }
> +    return rt;
> +}
> +
> +void helper_store_lpcr(CPUPPCState *env, target_ulong val)
> +{
> +    PowerPCCPU *cpu = env_archcpu(env);
> +
> +    ppc_store_lpcr(cpu, val);
> +}
> +#endif

-- 
David Gibson                    | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
                                | _way_ _around_!
http://www.ozlabs.org/~dgibson

Attachment: signature.asc
Description: PGP signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]