qemu-arm
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/5] target/arm/tcg: Indirect addressing for coprocessor register


From: Gavin Shan
Subject: [PATCH 1/5] target/arm/tcg: Indirect addressing for coprocessor register storage
Date: Mon, 11 Apr 2022 14:58:38 +0800

Currently, there is an array used as the storage for the coprocessor
registers. Each element in the array occupies for 8 bytes. It means
we have the assumption that the size of coprocessor can't exceed
8 bytes. The storage mechanism is used by KVM either. Unfortunately,
the assumption is conflicting with KVM's pseudo firmware registers,
whose sizes can be variable and exceeding 8 bytes. So the storage
scheme isn't working for KVM's pseudo firmware registers.

This introduces another array (@cpreg_value_indexes) to track the
storage location in @cpreg_values for the corresponding coprocessor
register. @cpreg_value_array_len is also added to track the total
storage size for all coprocessor registers. After that, the storage
is addressed indirectly by @cpreg_values[cpreg_value_indexes[i]].

For TCG case, each coprocessor register still has fixed 8 bytes
storage space. So the old direct addressing mechanism and new
indirect addressing mechanism can co-exist and interchangeable,
even in migration circumstance.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 target/arm/cpu.h    | 12 ++++++++++--
 target/arm/helper.c | 27 +++++++++++++++++++--------
 2 files changed, 29 insertions(+), 10 deletions(-)

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 23879de5fa..0129791b3f 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -795,17 +795,25 @@ struct ArchCPU {
      * 64 bit indexes, not CPRegInfo 32 bit indexes)
      */
     uint64_t *cpreg_indexes;
-    /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */
+    /*
+     * Values of the registers
+     * (cpreg_indexes[i]'s value is cpreg_values[cpreg_value_indexes[i]])
+     */
+    uint32_t *cpreg_value_indexes;
     uint64_t *cpreg_values;
-    /* Length of the indexes, values, reset_values arrays */
+    /* Length of the indexes, value indexes and values arrays */
     int32_t cpreg_array_len;
+    int32_t cpreg_value_array_len;
+
     /* These are used only for migration: incoming data arrives in
      * these fields and is sanity checked in post_load before copying
      * to the working data structures above.
      */
     uint64_t *cpreg_vmstate_indexes;
+    uint32_t *cpreg_vmstate_value_indexes;
     uint64_t *cpreg_vmstate_values;
     int32_t cpreg_vmstate_array_len;
+    int32_t cpreg_vmstate_value_array_len;
 
     DynamicGDBXMLInfo dyn_sysreg_xml;
     DynamicGDBXMLInfo dyn_svereg_xml;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7d14650615..e8cb4a9edb 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -163,7 +163,7 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
              * item in the list, we just recheck "does the raw write we must
              * have made in write_list_to_cpustate() read back OK" here.
              */
-            uint64_t oldval = cpu->cpreg_values[i];
+            uint64_t oldval = cpu->cpreg_values[cpu->cpreg_value_indexes[i]];
 
             if (oldval == newval) {
                 continue;
@@ -176,7 +176,7 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
 
             write_raw_cp_reg(&cpu->env, ri, newval);
         }
-        cpu->cpreg_values[i] = newval;
+        cpu->cpreg_values[cpu->cpreg_value_indexes[i]] = newval;
     }
     return ok;
 }
@@ -188,7 +188,7 @@ bool write_list_to_cpustate(ARMCPU *cpu)
 
     for (i = 0; i < cpu->cpreg_array_len; i++) {
         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
-        uint64_t v = cpu->cpreg_values[i];
+        uint64_t v = cpu->cpreg_values[cpu->cpreg_value_indexes[i]];
         const ARMCPRegInfo *ri;
 
         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
@@ -222,8 +222,12 @@ static void add_cpreg_to_list(gpointer key, gpointer 
opaque)
 
     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
+        cpu->cpreg_value_indexes[cpu->cpreg_array_len] =
+            cpu->cpreg_value_array_len;
+
         /* The value array need not be initialized at this point */
         cpu->cpreg_array_len++;
+        cpu->cpreg_value_array_len++;
     }
 }
 
@@ -238,6 +242,7 @@ static void count_cpreg(gpointer key, gpointer opaque)
 
     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
         cpu->cpreg_array_len++;
+        cpu->cpreg_value_array_len++;
     }
 }
 
@@ -261,26 +266,32 @@ void init_cpreg_list(ARMCPU *cpu)
      * Note that we require cpreg_tuples[] to be sorted by key ID.
      */
     GList *keys;
-    int arraylen;
+    int arraylen, value_arraylen;
 
     keys = g_hash_table_get_keys(cpu->cp_regs);
     keys = g_list_sort(keys, cpreg_key_compare);
 
     cpu->cpreg_array_len = 0;
-
+    cpu->cpreg_value_array_len = 0;
     g_list_foreach(keys, count_cpreg, cpu);
 
     arraylen = cpu->cpreg_array_len;
+    value_arraylen = cpu->cpreg_value_array_len;
     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
-    cpu->cpreg_values = g_new(uint64_t, arraylen);
+    cpu->cpreg_value_indexes = g_new(uint32_t, arraylen);
+    cpu->cpreg_values = g_new(uint64_t, value_arraylen);
     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
-    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
+    cpu->cpreg_vmstate_value_indexes = g_new(uint32_t, arraylen);
+    cpu->cpreg_vmstate_values = g_new(uint64_t, value_arraylen);
     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
-    cpu->cpreg_array_len = 0;
+    cpu->cpreg_vmstate_value_array_len = cpu->cpreg_value_array_len;
 
+    cpu->cpreg_array_len = 0;
+    cpu->cpreg_value_array_len = 0;
     g_list_foreach(keys, add_cpreg_to_list, cpu);
 
     assert(cpu->cpreg_array_len == arraylen);
+    assert(cpu->cpreg_value_array_len == value_arraylen);
 
     g_list_free(keys);
 }
-- 
2.23.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]