qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 05/10] cputlb: Move env->vtlb_index to env->tlb_


From: Philippe Mathieu-Daudé
Subject: Re: [Qemu-devel] [PATCH 05/10] cputlb: Move env->vtlb_index to env->tlb_d.vindex
Date: Tue, 23 Oct 2018 13:07:24 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.2.1

On 23/10/18 9:02, Richard Henderson wrote:
The rest of the tlb victim cache is per-tlb,
the next use index should be as well.

Signed-off-by: Richard Henderson <address@hidden>

Reviewed-by: Philippe Mathieu-Daudé <address@hidden>

---
  include/exec/cpu-defs.h | 5 +++--
  accel/tcg/cputlb.c      | 5 ++---
  2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index df8ae18d9d..181c0dbfa4 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -150,6 +150,8 @@ typedef struct CPUTLBDesc {
       */
      target_ulong large_page_addr;
      target_ulong large_page_mask;
+    /* The next index to use in the tlb victim table.  */
+    size_t vindex;
  } CPUTLBDesc;
/*
@@ -178,8 +180,7 @@ typedef struct CPUTLBCommon {
      CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE];               \
      CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE];                    \
      CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE];                 \
-    size_t tlb_flush_count;                                             \
-    target_ulong vtlb_index;                                            \
+    size_t tlb_flush_count;
#else diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 72b0567f70..d3b37ffa85 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -119,6 +119,7 @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, 
int mmu_idx)
      memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
      env->tlb_d[mmu_idx].large_page_addr = -1;
      env->tlb_d[mmu_idx].large_page_mask = -1;
+    env->tlb_d[mmu_idx].vindex = 0;
  }
/* This is OK because CPU architectures generally permit an
@@ -149,8 +150,6 @@ static void tlb_flush_nocheck(CPUState *cpu)
      qemu_spin_unlock(&env->tlb_c.lock);
cpu_tb_jmp_cache_clear(cpu);
-
-    env->vtlb_index = 0;
  }
static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
@@ -668,7 +667,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong 
vaddr,
       * different page; otherwise just overwrite the stale data.
       */
      if (!tlb_hit_page_anyprot(te, vaddr_page)) {
-        unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
+        unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
          CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
/* Evict the old entry into the victim tlb. */




reply via email to

[Prev in Thread] Current Thread [Next in Thread]