qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [mttcg] cputlb: Use async tlb_flush_by_mmuidx


From: Alvise Rigo
Subject: [Qemu-devel] [mttcg] cputlb: Use async tlb_flush_by_mmuidx
Date: Mon, 29 Feb 2016 14:16:03 +0100

As in the case of tlb_flush(), also tlb_flush_by_mmuidx has to query the
TLB flush if it targets another VCPU. To accomplish this, a new async
work has been added, together with a new TLBFlushByMMUIdxParams. A
bitmap is used to track the MMU indexes to flush.

This patch applies to the multi_tcg_v8 branch.

Signed-off-by: Alvise Rigo <address@hidden>
---
 cputlb.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 53 insertions(+), 12 deletions(-)

diff --git a/cputlb.c b/cputlb.c
index 29252d1..1eeeccb 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -103,9 +103,11 @@ void tlb_flush(CPUState *cpu, int flush_global)
     }
 }
 
-static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
+/* Flush tlb_table[] and tlb_v_table[] of @cpu at MMU indexes given by @bitmap.
+ * Flush also tb_jmp_cache. */
+static inline void tlb_tables_flush_bitmap(CPUState *cpu, unsigned long 
*bitmap)
 {
-    CPUArchState *env = cpu->env_ptr;
+    int mmu_idx;
 
 #if defined(DEBUG_TLB)
     printf("tlb_flush_by_mmuidx:");
@@ -114,6 +116,41 @@ static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, 
va_list argp)
        links while we are modifying them */
     cpu->current_tb = NULL;
 
+    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+        if (test_bit(mmu_idx, bitmap)) {
+            CPUArchState *env = cpu->env_ptr;
+#if defined(DEBUG_TLB)
+            printf(" %d", mmu_idx);
+#endif
+            memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
+            memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
+        }
+    }
+    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+
+#if defined(DEBUG_TLB)
+    printf("\n");
+#endif
+}
+
+struct TLBFlushByMMUIdxParams {
+    CPUState *cpu;
+    DECLARE_BITMAP(idx_to_flush, NB_MMU_MODES);
+};
+
+static void tlb_flush_by_mmuidx_async_work(void *opaque)
+{
+    struct TLBFlushByMMUIdxParams *params = opaque;
+
+    tlb_tables_flush_bitmap(params->cpu, params->idx_to_flush);
+
+    g_free(params);
+}
+
+static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
+{
+    DECLARE_BITMAP(idxmap, NB_MMU_MODES) = { 0 };
+
     for (;;) {
         int mmu_idx = va_arg(argp, int);
 
@@ -121,19 +158,23 @@ static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, 
va_list argp)
             break;
         }
 
-#if defined(DEBUG_TLB)
-        printf(" %d", mmu_idx);
-#endif
-
-        memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
-        memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
+        set_bit(mmu_idx, idxmap);
     }
 
-#if defined(DEBUG_TLB)
-    printf("\n");
-#endif
+    if (!qemu_cpu_is_self(cpu)) {
+        /* We do not set the pendind_tlb_flush bit, only a global flush
+         * does that. */
+        if (!atomic_read(&cpu->pending_tlb_flush)) {
+             struct TLBFlushByMMUIdxParams *params;
 
-    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+             params = g_malloc(sizeof(struct TLBFlushByMMUIdxParams));
+             params->cpu = cpu;
+             memcpy(params->idx_to_flush, idxmap, sizeof(idxmap));
+             async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, params);
+         }
+    } else {
+        tlb_tables_flush_bitmap(cpu, idxmap);
+    }
 }
 
 void tlb_flush_by_mmuidx(CPUState *cpu, ...)
-- 
2.7.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]