[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 39/80] tcg/aarch64: Use atom_and_align_for_opc
|
From: |
Richard Henderson |
|
Subject: |
[PULL 39/80] tcg/aarch64: Use atom_and_align_for_opc |
|
Date: |
Tue, 16 May 2023 12:41:04 -0700 |
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/aarch64/tcg-target.c.inc | 36 ++++++++++++++++++------------------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 0cc719d799..ea4108d59c 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -1593,6 +1593,7 @@ typedef struct {
TCGReg base;
TCGReg index;
TCGType index_ext;
+ TCGAtomAlign aa;
} HostAddress;
bool tcg_target_has_memory_bswap(MemOp memop)
@@ -1646,8 +1647,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
- unsigned a_bits = get_alignment_bits(opc);
- unsigned a_mask = (1u << a_bits) - 1;
+ unsigned a_mask;
+
+ h->aa = atom_and_align_for_opc(s, opc,
+ have_lse2 ? MO_ATOM_WITHIN16
+ : MO_ATOM_IFALIGN,
+ false);
+ a_mask = (1 << h->aa.align) - 1;
#ifdef CONFIG_SOFTMMU
unsigned s_bits = opc & MO_SIZE;
@@ -1693,7 +1699,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s,
HostAddress *h,
* bits within the address. For unaligned access, we check that we don't
* cross pages using the address of the last byte of the access.
*/
- if (a_bits >= s_bits) {
+ if (a_mask >= s_mask) {
x3 = addr_reg;
} else {
tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
@@ -1713,11 +1719,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
ldst->label_ptr[0] = s->code_ptr;
tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
- *h = (HostAddress){
- .base = TCG_REG_X1,
- .index = addr_reg,
- .index_ext = addr_type
- };
+ h->base = TCG_REG_X1,
+ h->index = addr_reg;
+ h->index_ext = addr_type;
#else
if (a_mask) {
ldst = new_ldst_label(s);
@@ -1735,17 +1739,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
}
if (USE_GUEST_BASE) {
- *h = (HostAddress){
- .base = TCG_REG_GUEST_BASE,
- .index = addr_reg,
- .index_ext = addr_type
- };
+ h->base = TCG_REG_GUEST_BASE;
+ h->index = addr_reg;
+ h->index_ext = addr_type;
} else {
- *h = (HostAddress){
- .base = addr_reg,
- .index = TCG_REG_XZR,
- .index_ext = TCG_TYPE_I64
- };
+ h->base = addr_reg;
+ h->index = TCG_REG_XZR;
+ h->index_ext = TCG_TYPE_I64;
}
#endif
--
2.34.1
- [PULL 27/80] tcg/sparc64: Use standard slow path for softmmu, (continued)
- [PULL 27/80] tcg/sparc64: Use standard slow path for softmmu, Richard Henderson, 2023/05/16
- [PULL 29/80] tcg/loongarch64: Check the host supports unaligned accesses, Richard Henderson, 2023/05/16
- [PULL 30/80] tcg/loongarch64: Support softmmu unaligned accesses, Richard Henderson, 2023/05/16
- [PULL 31/80] tcg/riscv: Support softmmu unaligned accesses, Richard Henderson, 2023/05/16
- [PULL 32/80] tcg: Introduce tcg_target_has_memory_bswap, Richard Henderson, 2023/05/16
- [PULL 34/80] tcg: Introduce tcg_out_movext3, Richard Henderson, 2023/05/16
- [PULL 35/80] tcg: Merge tcg_out_helper_load_regs into caller, Richard Henderson, 2023/05/16
- [PULL 37/80] tcg: Introduce atom_and_align_for_opc, Richard Henderson, 2023/05/16
- [PULL 33/80] tcg: Add INDEX_op_qemu_{ld,st}_i128, Richard Henderson, 2023/05/16
- [PULL 36/80] tcg: Support TCG_TYPE_I128 in tcg_out_{ld, st}_helper_{args, ret}, Richard Henderson, 2023/05/16
- [PULL 39/80] tcg/aarch64: Use atom_and_align_for_opc,
Richard Henderson <=
- [PULL 38/80] tcg/i386: Use atom_and_align_for_opc, Richard Henderson, 2023/05/16
- [PULL 40/80] tcg/arm: Use atom_and_align_for_opc, Richard Henderson, 2023/05/16
- [PULL 41/80] tcg/loongarch64: Use atom_and_align_for_opc, Richard Henderson, 2023/05/16
- [PULL 42/80] tcg/mips: Use atom_and_align_for_opc, Richard Henderson, 2023/05/16
- [PULL 44/80] tcg/riscv: Use atom_and_align_for_opc, Richard Henderson, 2023/05/16
- [PULL 46/80] tcg/sparc64: Use atom_and_align_for_opc, Richard Henderson, 2023/05/16
- [PULL 45/80] tcg/s390x: Use atom_and_align_for_opc, Richard Henderson, 2023/05/16
- [PULL 47/80] tcg/i386: Honor 64-bit atomicity in 32-bit mode, Richard Henderson, 2023/05/16
- [PULL 52/80] tcg/s390x: Support 128-bit load/store, Richard Henderson, 2023/05/16
- [PULL 65/80] tcg: Remove TCGv from tcg_gen_atomic_*, Richard Henderson, 2023/05/16