qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC v8 04/14] softmmu: Simplify helper_*_st_name, wrap RA


From: Alvise Rigo
Subject: [Qemu-devel] [RFC v8 04/14] softmmu: Simplify helper_*_st_name, wrap RAM code
Date: Tue, 19 Apr 2016 15:39:21 +0200

Attempting to simplify the helper_*_st_name, wrap the code relative to a
RAM access into an inline function. The function covers both BE and LE cases
and it is expanded twice in each helper (TODO: check this last statement).

Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
CC: Alex Bennée <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
 softmmu_template.h | 80 +++++++++++++++++++++++++++---------------------------
 1 file changed, 40 insertions(+), 40 deletions(-)

diff --git a/softmmu_template.h b/softmmu_template.h
index 9185486..ea6a0fb 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -433,13 +433,48 @@ static inline void 
smmu_helper(do_mmio_store)(CPUArchState *env,
     glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
 }
 
+static inline void smmu_helper(do_ram_store)(CPUArchState *env,
+                                             bool little_endian, DATA_TYPE val,
+                                             target_ulong addr, TCGMemOpIdx oi,
+                                             unsigned mmu_idx, int index,
+                                             uintptr_t retaddr)
+{
+    uintptr_t haddr;
+
+    /* Handle slow unaligned access (it spans two pages or IO).  */
+    if (DATA_SIZE > 1
+        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
+                     >= TARGET_PAGE_SIZE)) {
+        smmu_helper(do_unl_store)(env, little_endian, val, addr, oi, mmu_idx,
+                                  retaddr);
+        return;
+    }
+
+    /* Handle aligned access or unaligned access in the same page.  */
+    if ((addr & (DATA_SIZE - 1)) != 0
+        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+                             mmu_idx, retaddr);
+    }
+
+    haddr = addr + env->tlb_table[mmu_idx][index].addend;
+#if DATA_SIZE == 1
+    glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
+#else
+    if (little_endian) {
+        glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
+    } else {
+        glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
+    }
+#endif
+}
+
 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                        TCGMemOpIdx oi, uintptr_t retaddr)
 {
     unsigned mmu_idx = get_mmuidx(oi);
     int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
     target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
-    uintptr_t haddr;
 
     /* Adjust the given return address.  */
     retaddr -= GETPC_ADJ;
@@ -465,27 +500,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong 
addr, DATA_TYPE val,
         return;
     }
 
-    /* Handle slow unaligned access (it spans two pages or IO).  */
-    if (DATA_SIZE > 1
-        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
-                     >= TARGET_PAGE_SIZE)) {
-        smmu_helper(do_unl_store)(env, true, val, addr, oi, mmu_idx, retaddr);
-        return;
-    }
-
-    /* Handle aligned access or unaligned access in the same page.  */
-    if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
-        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
-                             mmu_idx, retaddr);
-    }
-
-    haddr = addr + env->tlb_table[mmu_idx][index].addend;
-#if DATA_SIZE == 1
-    glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
-#else
-    glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
-#endif
+    smmu_helper(do_ram_store)(env, true, val, addr, oi, mmu_idx, index,
+                              retaddr);
 }
 
 #if DATA_SIZE > 1
@@ -495,7 +511,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong 
addr, DATA_TYPE val,
     unsigned mmu_idx = get_mmuidx(oi);
     int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
     target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
-    uintptr_t haddr;
 
     /* Adjust the given return address.  */
     retaddr -= GETPC_ADJ;
@@ -521,23 +536,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong 
addr, DATA_TYPE val,
         return;
     }
 
-    /* Handle slow unaligned access (it spans two pages or IO).  */
-    if (DATA_SIZE > 1
-        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
-                     >= TARGET_PAGE_SIZE)) {
-        smmu_helper(do_unl_store)(env, false, val, addr, oi, mmu_idx, retaddr);
-        return;
-    }
-
-    /* Handle aligned access or unaligned access in the same page.  */
-    if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
-        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
-                             mmu_idx, retaddr);
-    }
-
-    haddr = addr + env->tlb_table[mmu_idx][index].addend;
-    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
+    smmu_helper(do_ram_store)(env, false, val, addr, oi, mmu_idx, index,
+                              retaddr);
 }
 #endif /* DATA_SIZE > 1 */
 
-- 
2.8.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]