[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 18/24] target/arm: Use finalize_memop for aa64 fpr load/store
From: |
Richard Henderson |
Subject: |
[PATCH v2 18/24] target/arm: Use finalize_memop for aa64 fpr load/store |
Date: |
Tue, 8 Dec 2020 12:01:12 -0600 |
For 128-bit load/store, use 16-byte alignment. This
requires that we perform the two operations in the
correct order so that we generate the alignment fault
before modifying memory.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate-a64.c | 42 +++++++++++++++++++++++---------------
1 file changed, 26 insertions(+), 16 deletions(-)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index d34ec892c6..152a0a37ab 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -957,25 +957,33 @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest,
TCGv_i64 tcg_addr,
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
{
/* This writes the bottom N bits of a 128 bit wide vector to memory */
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
+ TCGv_i64 tmplo = tcg_temp_new_i64();
+ MemOp mop;
+
+ tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
+
if (size < 4) {
- tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
- s->be_data + size);
+ mop = finalize_memop(s, size);
+ tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
} else {
bool be = s->be_data == MO_BE;
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
+ TCGv_i64 tmphi = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
+
+ mop = s->be_data | MO_Q;
+ tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
- tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
- s->be_data | MO_Q);
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
- tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
- s->be_data | MO_Q);
+ tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
+ get_mem_index(s), mop);
+
tcg_temp_free_i64(tcg_hiaddr);
+ tcg_temp_free_i64(tmphi);
}
- tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmplo);
}
/*
@@ -986,10 +994,11 @@ static void do_fp_ld(DisasContext *s, int destidx,
TCGv_i64 tcg_addr, int size)
/* This always zero-extends and writes to a full 128 bit wide vector */
TCGv_i64 tmplo = tcg_temp_new_i64();
TCGv_i64 tmphi = NULL;
+ MemOp mop;
if (size < 4) {
- MemOp memop = s->be_data + size;
- tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
+ mop = finalize_memop(s, size);
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
} else {
bool be = s->be_data == MO_BE;
TCGv_i64 tcg_hiaddr;
@@ -997,11 +1006,12 @@ static void do_fp_ld(DisasContext *s, int destidx,
TCGv_i64 tcg_addr, int size)
tmphi = tcg_temp_new_i64();
tcg_hiaddr = tcg_temp_new_i64();
+ mop = s->be_data | MO_Q;
+ tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
- tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr,
get_mem_index(s),
- s->be_data | MO_Q);
- tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr,
get_mem_index(s),
- s->be_data | MO_Q);
+ tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
+ get_mem_index(s), mop);
tcg_temp_free_i64(tcg_hiaddr);
}
--
2.25.1
- [PATCH v2 11/24] target/arm: Enforce alignment for SRS, (continued)
- [PATCH v2 11/24] target/arm: Enforce alignment for SRS, Richard Henderson, 2020/12/08
- [PATCH v2 12/24] target/arm: Enforce alignment for VLDM/VSTM, Richard Henderson, 2020/12/08
- [PATCH v2 13/24] target/arm: Enforce alignment for VLDR/VSTR, Richard Henderson, 2020/12/08
- [PATCH v2 14/24] target/arm: Enforce alignment for VLD1 (all lanes), Richard Henderson, 2020/12/08
- [PATCH v2 15/24] target/arm: Enforce alignment for VLDn/VSTn (multiple), Richard Henderson, 2020/12/08
- [PATCH v2 16/24] target/arm: Enforce alignment for VLDn/VSTn (single), Richard Henderson, 2020/12/08
- [PATCH v2 17/24] target/arm: Use finalize_memop for aa64 gpr load/store, Richard Henderson, 2020/12/08
- [PATCH v2 21/24] target/arm: Enforce alignment for aa64 vector LDn/STn (multiple), Richard Henderson, 2020/12/08
- [PATCH v2 19/24] target/arm: Enforce alignment for aa64 load-acq/store-rel, Richard Henderson, 2020/12/08
- [PATCH v2 22/24] target/arm: Enforce alignment for aa64 vector LDn/STn (single), Richard Henderson, 2020/12/08
- [PATCH v2 18/24] target/arm: Use finalize_memop for aa64 fpr load/store,
Richard Henderson <=
- [PATCH v2 20/24] target/arm: Use MemOp for size + endian in aa64 vector ld/st, Richard Henderson, 2020/12/08
- [PATCH v2 23/24] target/arm: Enforce alignment for sve LD1R, Richard Henderson, 2020/12/08
- [PATCH v2 24/24] target/arm: Enforce alignment for sve unpredicated LDR/STR, Richard Henderson, 2020/12/08