qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v2 2/9] target/arm: Convert v8 extensions from featu


From: Richard Henderson
Subject: [Qemu-devel] [PATCH v2 2/9] target/arm: Convert v8 extensions from feature bits to isar tests
Date: Thu, 27 Sep 2018 14:13:15 -0700

Most of the v8 extensions are self-contained within the ISAR
registers and are not implied by other feature bits, which
makes them the easiest to convert.

Signed-off-by: Richard Henderson <address@hidden>
---
 target/arm/cpu.h           | 123 +++++++++++++++++++++++++++++++++----
 target/arm/translate-a64.h |  20 ++++++
 target/arm/translate.h     |  16 +++++
 linux-user/elfload.c       |  46 ++++++++------
 target/arm/cpu.c           |  18 +++---
 target/arm/cpu64.c         |  41 +++++++------
 target/arm/translate-a64.c | 100 +++++++++++++++---------------
 target/arm/translate.c     |  35 +++++------
 8 files changed, 272 insertions(+), 127 deletions(-)

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index e1b9270b8c..1e3c4650ce 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1550,30 +1550,18 @@ enum arm_features {
     ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
     ARM_FEATURE_V8,
     ARM_FEATURE_AARCH64, /* supports 64 bit mode */
-    ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
     ARM_FEATURE_CBAR, /* has cp15 CBAR */
     ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
     ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
     ARM_FEATURE_EL2, /* has EL2 Virtualization support */
     ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
-    ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
-    ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
-    ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
     ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
     ARM_FEATURE_PMU, /* has PMU support */
     ARM_FEATURE_VBAR, /* has cp15 VBAR */
     ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
     ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
     ARM_FEATURE_SVE, /* has Scalable Vector Extension */
-    ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */
-    ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */
-    ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */
-    ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
-    ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */
-    ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */
-    ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */
     ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
-    ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions.  */
     ARM_FEATURE_M_MAIN, /* M profile Main Extension */
 };
 
@@ -3120,4 +3108,115 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, 
unsigned regno)
 /* Shared between translate-sve.c and sve_helper.c.  */
 extern const uint64_t pred_esz_masks[4];
 
+/*
+ * 32-bit feature tests via id registers.
+ */
+static inline bool aa32_feature_aes(ARMCPU *cpu)
+{
+    return FIELD_EX32(cpu->id_isar5, ID_ISAR5, AES) != 0;
+}
+
+static inline bool aa32_feature_pmull(ARMCPU *cpu)
+{
+    return FIELD_EX32(cpu->id_isar5, ID_ISAR5, AES) > 1;
+}
+
+static inline bool aa32_feature_sha1(ARMCPU *cpu)
+{
+    return FIELD_EX32(cpu->id_isar5, ID_ISAR5, SHA1) != 0;
+}
+
+static inline bool aa32_feature_sha2(ARMCPU *cpu)
+{
+    return FIELD_EX32(cpu->id_isar5, ID_ISAR5, SHA2) != 0;
+}
+
+static inline bool aa32_feature_crc32(ARMCPU *cpu)
+{
+    return FIELD_EX32(cpu->id_isar5, ID_ISAR5, CRC32) != 0;
+}
+
+static inline bool aa32_feature_rdm(ARMCPU *cpu)
+{
+    return FIELD_EX32(cpu->id_isar5, ID_ISAR5, RDM) != 0;
+}
+
+static inline bool aa32_feature_vcma(ARMCPU *cpu)
+{
+    return FIELD_EX32(cpu->id_isar5, ID_ISAR5, VCMA) != 0;
+}
+
+static inline bool aa32_feature_dp(ARMCPU *cpu)
+{
+    return FIELD_EX32(cpu->id_isar6, ID_ISAR6, DP) != 0;
+}
+
+/*
+ * 64-bit feature tests via id registers.
+ */
+static inline bool aa64_feature_aes(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
+}
+
+static inline bool aa64_feature_pmull(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
+}
+
+static inline bool aa64_feature_sha1(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
+}
+
+static inline bool aa64_feature_sha256(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
+}
+
+static inline bool aa64_feature_sha512(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
+}
+
+static inline bool aa64_feature_crc32(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
+}
+
+static inline bool aa64_feature_atomics(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
+}
+
+static inline bool aa64_feature_rdm(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
+}
+
+static inline bool aa64_feature_sha3(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
+}
+
+static inline bool aa64_feature_sm3(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
+}
+
+static inline bool aa64_feature_sm4(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
+}
+
+static inline bool aa64_feature_dp(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
+}
+
+static inline bool aa64_feature_fcma(ARMCPU *cpu)
+{
+    return FIELD_EX64(cpu->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
+}
+
 #endif
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
index 63d958cf50..b4ef9eb024 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/translate-a64.h
@@ -123,4 +123,24 @@ typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, 
int64_t,
 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
                         uint32_t, uint32_t, uint32_t);
 
+#define FORWARD_FEATURE(NAME) \
+    static inline bool aa64_dc_feature_##NAME(DisasContext *dc) \
+    { return aa64_feature_##NAME(dc->cpu); }
+
+FORWARD_FEATURE(aes)
+FORWARD_FEATURE(pmull)
+FORWARD_FEATURE(sha1)
+FORWARD_FEATURE(sha256)
+FORWARD_FEATURE(sha512)
+FORWARD_FEATURE(crc32)
+FORWARD_FEATURE(atomics)
+FORWARD_FEATURE(rdm)
+FORWARD_FEATURE(sha3)
+FORWARD_FEATURE(sm3)
+FORWARD_FEATURE(sm4)
+FORWARD_FEATURE(dp)
+FORWARD_FEATURE(fcma)
+
+#undef FORWARD_FEATURE
+
 #endif /* TARGET_ARM_TRANSLATE_A64_H */
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 45f04244be..baacfd7e6b 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -7,6 +7,7 @@
 /* internal defines */
 typedef struct DisasContext {
     DisasContextBase base;
+    ARMCPU *cpu;  /* for access to the id_* registers */
 
     target_ulong pc;
     target_ulong page_start;
@@ -189,4 +190,19 @@ static inline TCGv_i32 get_ahp_flag(void)
     return ret;
 }
 
+#define FORWARD_FEATURE(NAME) \
+    static inline bool aa32_dc_feature_##NAME(DisasContext *dc) \
+    { return aa32_feature_##NAME(dc->cpu); }
+
+FORWARD_FEATURE(aes)
+FORWARD_FEATURE(pmull)
+FORWARD_FEATURE(sha1)
+FORWARD_FEATURE(sha2)
+FORWARD_FEATURE(crc32)
+FORWARD_FEATURE(rdm)
+FORWARD_FEATURE(vcma)
+FORWARD_FEATURE(dp)
+
+#undef FORWARD_FEATURE
+
 #endif /* TARGET_ARM_TRANSLATE_H */
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index e97c4cde49..408bf67206 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -458,6 +458,10 @@ static uint32_t get_elf_hwcap(void)
     /* probe for the extra features */
 #define GET_FEATURE(feat, hwcap) \
     do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
+
+#define GET_FEATURE_ID(feat, hwcap) \
+    do { if (aa32_feature_##feat(cpu)) { hwcaps |= hwcap; } } while (0)
+
     /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
     GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
     GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
@@ -485,15 +489,16 @@ static uint32_t get_elf_hwcap2(void)
     ARMCPU *cpu = ARM_CPU(thread_cpu);
     uint32_t hwcaps = 0;
 
-    GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES);
-    GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL);
-    GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1);
-    GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2);
-    GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32);
+    GET_FEATURE_ID(aes, ARM_HWCAP2_ARM_AES);
+    GET_FEATURE_ID(pmull, ARM_HWCAP2_ARM_PMULL);
+    GET_FEATURE_ID(sha1, ARM_HWCAP2_ARM_SHA1);
+    GET_FEATURE_ID(sha2, ARM_HWCAP2_ARM_SHA2);
+    GET_FEATURE_ID(crc32, ARM_HWCAP2_ARM_CRC32);
     return hwcaps;
 }
 
 #undef GET_FEATURE
+#undef GET_FEATURE_ID
 
 #else
 /* 64 bit ARM definitions */
@@ -570,23 +575,28 @@ static uint32_t get_elf_hwcap(void)
     /* probe for the extra features */
 #define GET_FEATURE(feat, hwcap) \
     do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
-    GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES);
-    GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL);
-    GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1);
-    GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2);
-    GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32);
-    GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3);
-    GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3);
-    GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4);
-    GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
+#define GET_FEATURE_ID(feat, hwcap) \
+    do { if (aa64_feature_##feat(cpu)) { hwcaps |= hwcap; } } while (0)
+
+    GET_FEATURE_ID(aes, ARM_HWCAP_A64_AES);
+    GET_FEATURE_ID(pmull, ARM_HWCAP_A64_PMULL);
+    GET_FEATURE_ID(sha1, ARM_HWCAP_A64_SHA1);
+    GET_FEATURE_ID(sha256, ARM_HWCAP_A64_SHA2);
+    GET_FEATURE_ID(sha512, ARM_HWCAP_A64_SHA512);
+    GET_FEATURE_ID(crc32, ARM_HWCAP_A64_CRC32);
+    GET_FEATURE_ID(sha3, ARM_HWCAP_A64_SHA3);
+    GET_FEATURE_ID(sm3, ARM_HWCAP_A64_SM3);
+    GET_FEATURE_ID(sm4, ARM_HWCAP_A64_SM4);
     GET_FEATURE(ARM_FEATURE_V8_FP16,
                 ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
-    GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS);
-    GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
-    GET_FEATURE(ARM_FEATURE_V8_DOTPROD, ARM_HWCAP_A64_ASIMDDP);
-    GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
+    GET_FEATURE_ID(atomics, ARM_HWCAP_A64_ATOMICS);
+    GET_FEATURE_ID(rdm, ARM_HWCAP_A64_ASIMDRDM);
+    GET_FEATURE_ID(dp, ARM_HWCAP_A64_ASIMDDP);
+    GET_FEATURE_ID(fcma, ARM_HWCAP_A64_FCMA);
     GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE);
+
 #undef GET_FEATURE
+#undef GET_FEATURE_ID
 
     return hwcaps;
 }
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index b5e61cc177..17c9c43f41 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1827,17 +1827,17 @@ static void arm_max_initfn(Object *obj)
         cortex_a15_initfn(obj);
 #ifdef CONFIG_USER_ONLY
         /* We don't set these in system emulation mode for the moment,
-         * since we don't correctly set the ID registers to advertise them,
+         * since we don't correctly set (all of) the ID registers to
+         * advertise them.
          */
         set_feature(&cpu->env, ARM_FEATURE_V8);
-        set_feature(&cpu->env, ARM_FEATURE_V8_AES);
-        set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
-        set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
-        set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
-        set_feature(&cpu->env, ARM_FEATURE_CRC);
-        set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
-        set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
-        set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, AES, 2); /* AES + PMULL */
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, SHA1, 1);
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, SHA2, 1);
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, CRC32, 1);
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, RDM, 1);
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, VCMA, 1);
+        FIELD_DP32(cpu->id_isar6, ID_ISAR6, DP, 1);
 #endif
     }
 }
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 800bff780e..f9830b67f3 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -109,11 +109,6 @@ static void aarch64_a57_initfn(Object *obj)
     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
-    set_feature(&cpu->env, ARM_FEATURE_V8_AES);
-    set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
-    set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
-    set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
-    set_feature(&cpu->env, ARM_FEATURE_CRC);
     set_feature(&cpu->env, ARM_FEATURE_EL2);
     set_feature(&cpu->env, ARM_FEATURE_EL3);
     set_feature(&cpu->env, ARM_FEATURE_PMU);
@@ -170,11 +165,6 @@ static void aarch64_a53_initfn(Object *obj)
     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
-    set_feature(&cpu->env, ARM_FEATURE_V8_AES);
-    set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
-    set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
-    set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
-    set_feature(&cpu->env, ARM_FEATURE_CRC);
     set_feature(&cpu->env, ARM_FEATURE_EL2);
     set_feature(&cpu->env, ARM_FEATURE_EL3);
     set_feature(&cpu->env, ARM_FEATURE_PMU);
@@ -254,6 +244,29 @@ static void aarch64_max_initfn(Object *obj)
         kvm_arm_set_cpu_features_from_host(cpu);
     } else {
         aarch64_a57_initfn(obj);
+
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, SHA1, 1);
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, CRC32, 1);
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, ATOMIC, 2);
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, RDM, 1);
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, SHA3, 1);
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, SM3, 1);
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, SM4, 1);
+        FIELD_DP64(cpu->id_aa64isar0, ID_AA64ISAR0, DP, 1);
+
+        FIELD_DP64(cpu->id_aa64isar1, ID_AA64ISAR1, FCMA, 1);
+
+        /* Replicate the same data to the 32-bit id registers.  */
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, AES, 2); /* AES + PMULL */
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, SHA1, 1);
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, SHA2, 1);
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, CRC32, 1);
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, RDM, 1);
+        FIELD_DP32(cpu->id_isar5, ID_ISAR5, VCMA, 1);
+        FIELD_DP32(cpu->id_isar6, ID_ISAR6, DP, 1);
+
 #ifdef CONFIG_USER_ONLY
         /* We don't set these in system emulation mode for the moment,
          * since we don't correctly set the ID registers to advertise them,
@@ -261,15 +274,7 @@ static void aarch64_max_initfn(Object *obj)
          * whereas the architecture requires them to be present in both if
          * present in either.
          */
-        set_feature(&cpu->env, ARM_FEATURE_V8_SHA512);
-        set_feature(&cpu->env, ARM_FEATURE_V8_SHA3);
-        set_feature(&cpu->env, ARM_FEATURE_V8_SM3);
-        set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
-        set_feature(&cpu->env, ARM_FEATURE_V8_ATOMICS);
-        set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
-        set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
         set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
-        set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
         set_feature(&cpu->env, ARM_FEATURE_SVE);
         /* For usermode -cpu max we can use a larger and more efficient DCZ
          * blocksize since we don't have to follow what the hardware does.
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 8ca3876707..40575485aa 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2318,7 +2318,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t 
insn)
         }
         if (rt2 == 31
             && ((rt | rs) & 1) == 0
-            && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
+            && aa64_dc_feature_atomics(s)) {
             /* CASP / CASPL */
             gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
             return;
@@ -2340,7 +2340,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t 
insn)
         }
         if (rt2 == 31
             && ((rt | rs) & 1) == 0
-            && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
+            && aa64_dc_feature_atomics(s)) {
             /* CASPA / CASPAL */
             gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
             return;
@@ -2351,7 +2351,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t 
insn)
     case 0xb: /* CASL */
     case 0xe: /* CASA */
     case 0xf: /* CASAL */
-        if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
+        if (rt2 == 31 && aa64_dc_feature_atomics(s)) {
             gen_compare_and_swap(s, rs, rt, rn, size);
             return;
         }
@@ -2890,11 +2890,10 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t 
insn,
     int rs = extract32(insn, 16, 5);
     int rn = extract32(insn, 5, 5);
     int o3_opc = extract32(insn, 12, 4);
-    int feature = ARM_FEATURE_V8_ATOMICS;
     TCGv_i64 tcg_rn, tcg_rs;
     AtomicThreeOpFn *fn;
 
-    if (is_vector) {
+    if (is_vector || !aa64_dc_feature_atomics(s)) {
         unallocated_encoding(s);
         return;
     }
@@ -2930,10 +2929,6 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t 
insn,
         unallocated_encoding(s);
         return;
     }
-    if (!arm_dc_feature(s, feature)) {
-        unallocated_encoding(s);
-        return;
-    }
 
     if (rn == 31) {
         gen_check_sp_alignment(s);
@@ -4564,7 +4559,7 @@ static void handle_crc32(DisasContext *s,
     TCGv_i64 tcg_acc, tcg_val;
     TCGv_i32 tcg_bytes;
 
-    if (!arm_dc_feature(s, ARM_FEATURE_CRC)
+    if (!aa64_dc_feature_crc32(s)
         || (sf == 1 && sz != 3)
         || (sf == 0 && sz == 3)) {
         unallocated_encoding(s);
@@ -8608,7 +8603,7 @@ static void 
disas_simd_scalar_three_reg_same_extra(DisasContext *s,
     bool u = extract32(insn, 29, 1);
     TCGv_i32 ele1, ele2, ele3;
     TCGv_i64 res;
-    int feature;
+    bool feature;
 
     switch (u * 16 + opcode) {
     case 0x10: /* SQRDMLAH (vector) */
@@ -8617,13 +8612,13 @@ static void 
disas_simd_scalar_three_reg_same_extra(DisasContext *s,
             unallocated_encoding(s);
             return;
         }
-        feature = ARM_FEATURE_V8_RDM;
+        feature = aa64_dc_feature_rdm(s);
         break;
     default:
         unallocated_encoding(s);
         return;
     }
-    if (!arm_dc_feature(s, feature)) {
+    if (!feature) {
         unallocated_encoding(s);
         return;
     }
@@ -10352,7 +10347,7 @@ static void disas_simd_three_reg_diff(DisasContext *s, 
uint32_t insn)
             return;
         }
         if (size == 3) {
-            if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
+            if (!aa64_dc_feature_pmull(s)) {
                 unallocated_encoding(s);
                 return;
             }
@@ -11404,7 +11399,8 @@ static void 
disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
     int size = extract32(insn, 22, 2);
     bool u = extract32(insn, 29, 1);
     bool is_q = extract32(insn, 30, 1);
-    int feature, rot;
+    bool feature;
+    int rot;
 
     switch (u * 16 + opcode) {
     case 0x10: /* SQRDMLAH (vector) */
@@ -11413,7 +11409,7 @@ static void 
disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
             unallocated_encoding(s);
             return;
         }
-        feature = ARM_FEATURE_V8_RDM;
+        feature = aa64_dc_feature_rdm(s);
         break;
     case 0x02: /* SDOT (vector) */
     case 0x12: /* UDOT (vector) */
@@ -11421,7 +11417,7 @@ static void 
disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
             unallocated_encoding(s);
             return;
         }
-        feature = ARM_FEATURE_V8_DOTPROD;
+        feature = aa64_dc_feature_dp(s);
         break;
     case 0x18: /* FCMLA, #0 */
     case 0x19: /* FCMLA, #90 */
@@ -11435,13 +11431,13 @@ static void 
disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
             unallocated_encoding(s);
             return;
         }
-        feature = ARM_FEATURE_V8_FCMA;
+        feature = aa64_dc_feature_fcma(s);
         break;
     default:
         unallocated_encoding(s);
         return;
     }
-    if (!arm_dc_feature(s, feature)) {
+    if (!feature) {
         unallocated_encoding(s);
         return;
     }
@@ -12655,14 +12651,14 @@ static void disas_simd_indexed(DisasContext *s, 
uint32_t insn)
         break;
     case 0x1d: /* SQRDMLAH */
     case 0x1f: /* SQRDMLSH */
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
+        if (!aa64_dc_feature_rdm(s)) {
             unallocated_encoding(s);
             return;
         }
         break;
     case 0x0e: /* SDOT */
     case 0x1e: /* UDOT */
-        if (size != MO_32 || !arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
+        if (size != MO_32 || !aa64_dc_feature_dp(s)) {
             unallocated_encoding(s);
             return;
         }
@@ -12671,7 +12667,7 @@ static void disas_simd_indexed(DisasContext *s, 
uint32_t insn)
     case 0x13: /* FCMLA #90 */
     case 0x15: /* FCMLA #180 */
     case 0x17: /* FCMLA #270 */
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
+        if (!aa64_dc_feature_fcma(s)) {
             unallocated_encoding(s);
             return;
         }
@@ -13198,8 +13194,7 @@ static void disas_crypto_aes(DisasContext *s, uint32_t 
insn)
     TCGv_i32 tcg_decrypt;
     CryptoThreeOpIntFn *genfn;
 
-    if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
-        || size != 0) {
+    if (!aa64_dc_feature_aes(s) || size != 0) {
         unallocated_encoding(s);
         return;
     }
@@ -13256,7 +13251,7 @@ static void disas_crypto_three_reg_sha(DisasContext *s, 
uint32_t insn)
     int rd = extract32(insn, 0, 5);
     CryptoThreeOpFn *genfn;
     TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
-    int feature = ARM_FEATURE_V8_SHA256;
+    bool feature;
 
     if (size != 0) {
         unallocated_encoding(s);
@@ -13269,23 +13264,26 @@ static void disas_crypto_three_reg_sha(DisasContext 
*s, uint32_t insn)
     case 2: /* SHA1M */
     case 3: /* SHA1SU0 */
         genfn = NULL;
-        feature = ARM_FEATURE_V8_SHA1;
+        feature = aa64_dc_feature_sha1(s);
         break;
     case 4: /* SHA256H */
         genfn = gen_helper_crypto_sha256h;
+        feature = aa64_dc_feature_sha256(s);
         break;
     case 5: /* SHA256H2 */
         genfn = gen_helper_crypto_sha256h2;
+        feature = aa64_dc_feature_sha256(s);
         break;
     case 6: /* SHA256SU1 */
         genfn = gen_helper_crypto_sha256su1;
+        feature = aa64_dc_feature_sha256(s);
         break;
     default:
         unallocated_encoding(s);
         return;
     }
 
-    if (!arm_dc_feature(s, feature)) {
+    if (!feature) {
         unallocated_encoding(s);
         return;
     }
@@ -13326,7 +13324,7 @@ static void disas_crypto_two_reg_sha(DisasContext *s, 
uint32_t insn)
     int rn = extract32(insn, 5, 5);
     int rd = extract32(insn, 0, 5);
     CryptoTwoOpFn *genfn;
-    int feature;
+    bool feature;
     TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
 
     if (size != 0) {
@@ -13336,15 +13334,15 @@ static void disas_crypto_two_reg_sha(DisasContext *s, 
uint32_t insn)
 
     switch (opcode) {
     case 0: /* SHA1H */
-        feature = ARM_FEATURE_V8_SHA1;
+        feature = aa64_dc_feature_sha1(s);
         genfn = gen_helper_crypto_sha1h;
         break;
     case 1: /* SHA1SU1 */
-        feature = ARM_FEATURE_V8_SHA1;
+        feature = aa64_dc_feature_sha1(s);
         genfn = gen_helper_crypto_sha1su1;
         break;
     case 2: /* SHA256SU0 */
-        feature = ARM_FEATURE_V8_SHA256;
+        feature = aa64_dc_feature_sha256(s);
         genfn = gen_helper_crypto_sha256su0;
         break;
     default:
@@ -13352,7 +13350,7 @@ static void disas_crypto_two_reg_sha(DisasContext *s, 
uint32_t insn)
         return;
     }
 
-    if (!arm_dc_feature(s, feature)) {
+    if (!feature) {
         unallocated_encoding(s);
         return;
     }
@@ -13383,40 +13381,40 @@ static void 
disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
     int rm = extract32(insn, 16, 5);
     int rn = extract32(insn, 5, 5);
     int rd = extract32(insn, 0, 5);
-    int feature;
+    bool feature;
     CryptoThreeOpFn *genfn;
 
     if (o == 0) {
         switch (opcode) {
         case 0: /* SHA512H */
-            feature = ARM_FEATURE_V8_SHA512;
+            feature = aa64_dc_feature_sha512(s);
             genfn = gen_helper_crypto_sha512h;
             break;
         case 1: /* SHA512H2 */
-            feature = ARM_FEATURE_V8_SHA512;
+            feature = aa64_dc_feature_sha512(s);
             genfn = gen_helper_crypto_sha512h2;
             break;
         case 2: /* SHA512SU1 */
-            feature = ARM_FEATURE_V8_SHA512;
+            feature = aa64_dc_feature_sha512(s);
             genfn = gen_helper_crypto_sha512su1;
             break;
         case 3: /* RAX1 */
-            feature = ARM_FEATURE_V8_SHA3;
+            feature = aa64_dc_feature_sha3(s);
             genfn = NULL;
             break;
         }
     } else {
         switch (opcode) {
         case 0: /* SM3PARTW1 */
-            feature = ARM_FEATURE_V8_SM3;
+            feature = aa64_dc_feature_sm3(s);
             genfn = gen_helper_crypto_sm3partw1;
             break;
         case 1: /* SM3PARTW2 */
-            feature = ARM_FEATURE_V8_SM3;
+            feature = aa64_dc_feature_sm3(s);
             genfn = gen_helper_crypto_sm3partw2;
             break;
         case 2: /* SM4EKEY */
-            feature = ARM_FEATURE_V8_SM4;
+            feature = aa64_dc_feature_sm4(s);
             genfn = gen_helper_crypto_sm4ekey;
             break;
         default:
@@ -13425,7 +13423,7 @@ static void disas_crypto_three_reg_sha512(DisasContext 
*s, uint32_t insn)
         }
     }
 
-    if (!arm_dc_feature(s, feature)) {
+    if (!feature) {
         unallocated_encoding(s);
         return;
     }
@@ -13484,16 +13482,16 @@ static void disas_crypto_two_reg_sha512(DisasContext 
*s, uint32_t insn)
     int rn = extract32(insn, 5, 5);
     int rd = extract32(insn, 0, 5);
     TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
-    int feature;
+    bool feature;
     CryptoTwoOpFn *genfn;
 
     switch (opcode) {
     case 0: /* SHA512SU0 */
-        feature = ARM_FEATURE_V8_SHA512;
+        feature = aa64_dc_feature_sha512(s);
         genfn = gen_helper_crypto_sha512su0;
         break;
     case 1: /* SM4E */
-        feature = ARM_FEATURE_V8_SM4;
+        feature = aa64_dc_feature_sm4(s);
         genfn = gen_helper_crypto_sm4e;
         break;
     default:
@@ -13501,7 +13499,7 @@ static void disas_crypto_two_reg_sha512(DisasContext 
*s, uint32_t insn)
         return;
     }
 
-    if (!arm_dc_feature(s, feature)) {
+    if (!feature) {
         unallocated_encoding(s);
         return;
     }
@@ -13532,22 +13530,22 @@ static void disas_crypto_four_reg(DisasContext *s, 
uint32_t insn)
     int ra = extract32(insn, 10, 5);
     int rn = extract32(insn, 5, 5);
     int rd = extract32(insn, 0, 5);
-    int feature;
+    bool feature;
 
     switch (op0) {
     case 0: /* EOR3 */
     case 1: /* BCAX */
-        feature = ARM_FEATURE_V8_SHA3;
+        feature = aa64_dc_feature_sha3(s);
         break;
     case 2: /* SM3SS1 */
-        feature = ARM_FEATURE_V8_SM3;
+        feature = aa64_dc_feature_sm3(s);
         break;
     default:
         unallocated_encoding(s);
         return;
     }
 
-    if (!arm_dc_feature(s, feature)) {
+    if (!feature) {
         unallocated_encoding(s);
         return;
     }
@@ -13634,7 +13632,7 @@ static void disas_crypto_xar(DisasContext *s, uint32_t 
insn)
     TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
     int pass;
 
-    if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
+    if (!aa64_dc_feature_sha3(s)) {
         unallocated_encoding(s);
         return;
     }
@@ -13680,7 +13678,7 @@ static void disas_crypto_three_reg_imm2(DisasContext 
*s, uint32_t insn)
     TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
     TCGv_i32 tcg_imm2, tcg_opcode;
 
-    if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
+    if (!aa64_dc_feature_sm3(s)) {
         unallocated_encoding(s);
         return;
     }
diff --git a/target/arm/translate.c b/target/arm/translate.c
index c6a5d2ac44..06d61b1e0d 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -5660,7 +5660,7 @@ static const uint8_t neon_2rm_sizes[] = {
 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
                          int q, int rd, int rn, int rm)
 {
-    if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
+    if (aa32_dc_feature_rdm(s)) {
         int opr_sz = (1 + q) * 8;
         tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
                            vfp_reg_offset(1, rn),
@@ -5734,7 +5734,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t 
insn)
                 return 1;
             }
             if (!u) { /* SHA-1 */
-                if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
+                if (!aa32_dc_feature_sha1(s)) {
                     return 1;
                 }
                 ptr1 = vfp_reg_ptr(true, rd);
@@ -5744,7 +5744,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t 
insn)
                 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
                 tcg_temp_free_i32(tmp4);
             } else { /* SHA-256 */
-                if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
+                if (!aa32_dc_feature_sha2(s) || size == 3) {
                     return 1;
                 }
                 ptr1 = vfp_reg_ptr(true, rd);
@@ -6739,7 +6739,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t 
insn)
                 if (op == 14 && size == 2) {
                     TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
 
-                    if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
+                    if (!aa32_dc_feature_pmull(s)) {
                         return 1;
                     }
                     tcg_rn = tcg_temp_new_i64();
@@ -7056,7 +7056,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t 
insn)
                     {
                         NeonGenThreeOpEnvFn *fn;
 
-                        if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
+                        if (!aa32_dc_feature_rdm(s)) {
                             return 1;
                         }
                         if (u && ((rd | rn) & 1)) {
@@ -7330,8 +7330,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t 
insn)
                     break;
                 }
                 case NEON_2RM_AESE: case NEON_2RM_AESMC:
-                    if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
-                        || ((rm | rd) & 1)) {
+                    if (!aa32_dc_feature_aes(s) || ((rm | rd) & 1)) {
                         return 1;
                     }
                     ptr1 = vfp_reg_ptr(true, rd);
@@ -7352,8 +7351,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t 
insn)
                     tcg_temp_free_i32(tmp3);
                     break;
                 case NEON_2RM_SHA1H:
-                    if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
-                        || ((rm | rd) & 1)) {
+                    if (!aa32_dc_feature_sha1(s) || ((rm | rd) & 1)) {
                         return 1;
                     }
                     ptr1 = vfp_reg_ptr(true, rd);
@@ -7370,10 +7368,10 @@ static int disas_neon_data_insn(DisasContext *s, 
uint32_t insn)
                     }
                     /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
                     if (q) {
-                        if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
+                        if (!aa32_dc_feature_sha2(s)) {
                             return 1;
                         }
-                    } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
+                    } else if (!aa32_dc_feature_sha1(s)) {
                         return 1;
                     }
                     ptr1 = vfp_reg_ptr(true, rd);
@@ -7784,7 +7782,7 @@ static int disas_neon_insn_3same_ext(DisasContext *s, 
uint32_t insn)
         /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
         int size = extract32(insn, 20, 1);
         data = extract32(insn, 23, 2); /* rot */
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
+        if (!aa32_dc_feature_vcma(s)
             || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
             return 1;
         }
@@ -7793,7 +7791,7 @@ static int disas_neon_insn_3same_ext(DisasContext *s, 
uint32_t insn)
         /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
         int size = extract32(insn, 20, 1);
         data = extract32(insn, 24, 1); /* rot */
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
+        if (!aa32_dc_feature_vcma(s)
             || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
             return 1;
         }
@@ -7801,7 +7799,7 @@ static int disas_neon_insn_3same_ext(DisasContext *s, 
uint32_t insn)
     } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
         /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
         bool u = extract32(insn, 4, 1);
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
+        if (!aa32_dc_feature_dp(s)) {
             return 1;
         }
         fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
@@ -7863,7 +7861,7 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext 
*s, uint32_t insn)
         int size = extract32(insn, 23, 1);
         int index;
 
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
+        if (!aa32_dc_feature_vcma(s)) {
             return 1;
         }
         if (size == 0) {
@@ -7884,7 +7882,7 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext 
*s, uint32_t insn)
     } else if ((insn & 0xffb00f00) == 0xfe200d00) {
         /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
         int u = extract32(insn, 4, 1);
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
+        if (!aa32_dc_feature_dp(s)) {
             return 1;
         }
         fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
@@ -8860,8 +8858,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int 
insn)
              * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
              * Bits 8, 10 and 11 should be zero.
              */
-            if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
-                (c & 0xd) != 0) {
+            if (!aa32_dc_feature_crc32(s) || op1 == 0x3 || (c & 0xd) != 0) {
                 goto illegal_op;
             }
 
@@ -10706,7 +10703,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t 
insn)
                 case 0x28:
                 case 0x29:
                 case 0x2a:
-                    if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
+                    if (!aa32_dc_feature_crc32(s)) {
                         goto illegal_op;
                     }
                     break;
-- 
2.17.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]