[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 12/42] target/alpha: Use MO_ALIGN where required
|
From: |
Richard Henderson |
|
Subject: |
[PULL 12/42] target/alpha: Use MO_ALIGN where required |
|
Date: |
Fri, 5 May 2023 22:24:17 +0100 |
Mark all memory operations that are not already marked with UNALIGN.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/alpha/translate.c | 36 ++++++++++++++++++++----------------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index ffbac1c114..be8adb2526 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -2399,21 +2399,21 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access (hw_ldl/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL |
MO_ALIGN);
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ |
MO_ALIGN);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL |
MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ |
MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
@@ -2438,11 +2438,13 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
goto invalid_opc;
case 0xA:
/* Longword virtual access with protection check (hw_ldl/w) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
+ MO_LESL | MO_ALIGN);
break;
case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
+ MO_LEUQ | MO_ALIGN);
break;
case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/
@@ -2453,12 +2455,14 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
case 0xE:
/* Longword virtual access with alternate access mode and
protection checks (hw_ldl/wa) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
+ MO_LESL | MO_ALIGN);
break;
case 0xF:
/* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
+ MO_LEUQ | MO_ALIGN);
break;
}
break;
@@ -2659,7 +2663,7 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x1:
/* Quadword physical access */
@@ -2667,17 +2671,17 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x2:
/* Longword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
- MMU_PHYS_IDX, MO_LESL);
+ MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x3:
/* Quadword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
- MMU_PHYS_IDX, MO_LEUQ);
+ MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x4:
/* Longword virtual access */
@@ -2771,11 +2775,11 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
break;
case 0x2A:
/* LDL_L */
- gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1);
+ gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
break;
case 0x2B:
/* LDQ_L */
- gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
break;
case 0x2C:
/* STL */
@@ -2788,12 +2792,12 @@ static DisasJumpType translate_one(DisasContext *ctx,
uint32_t insn)
case 0x2E:
/* STL_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LESL);
+ ctx->mem_idx, MO_LESL | MO_ALIGN);
break;
case 0x2F:
/* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LEUQ);
+ ctx->mem_idx, MO_LEUQ | MO_ALIGN);
break;
case 0x30:
/* BR */
--
2.34.1
- [PULL 14/42] target/hppa: Use MO_ALIGN for system UNALIGN(), (continued)
- [PULL 14/42] target/hppa: Use MO_ALIGN for system UNALIGN(), Richard Henderson, 2023/05/05
- [PULL 07/42] target/s390x: Finish conversion to tcg_gen_qemu_{ld, st}_*, Richard Henderson, 2023/05/05
- [PULL 16/42] target/sparc: Use MO_ALIGN where required, Richard Henderson, 2023/05/05
- [PULL 15/42] target/hppa: Remove TARGET_ALIGNED_ONLY, Richard Henderson, 2023/05/05
- [PULL 11/42] target/alpha: Use MO_ALIGN for system UNALIGN(), Richard Henderson, 2023/05/05
- [PULL 08/42] target/sparc: Finish conversion to tcg_gen_qemu_{ld, st}_*, Richard Henderson, 2023/05/05
- [PULL 09/42] target/xtensa: Finish conversion to tcg_gen_qemu_{ld, st}_*, Richard Henderson, 2023/05/05
- [PULL 13/42] target/alpha: Remove TARGET_ALIGNED_ONLY, Richard Henderson, 2023/05/05
- [PULL 18/42] target/sparc: Remove TARGET_ALIGNED_ONLY, Richard Henderson, 2023/05/05
- [PULL 10/42] tcg: Remove compatability helpers for qemu ld/st, Richard Henderson, 2023/05/05
- [PULL 12/42] target/alpha: Use MO_ALIGN where required,
Richard Henderson <=
- [PULL 17/42] target/sparc: Use cpu_ld*_code_mmu, Richard Henderson, 2023/05/05
- [PULL 21/42] tcg/i386: Introduce HostAddress, Richard Henderson, 2023/05/05
- [PULL 25/42] tcg/aarch64: Introduce HostAddress, Richard Henderson, 2023/05/05
- [PULL 22/42] tcg/i386: Drop r0+r1 local variables from tcg_out_tlb_load, Richard Henderson, 2023/05/05
- [PULL 28/42] tcg/loongarch64: Rationalize args to tcg_out_qemu_{ld, st}, Richard Henderson, 2023/05/05
- [PULL 31/42] tcg/ppc: Rationalize args to tcg_out_qemu_{ld,st}, Richard Henderson, 2023/05/05
- [PULL 24/42] tcg/aarch64: Rationalize args to tcg_out_qemu_{ld,st}, Richard Henderson, 2023/05/05
- [PULL 26/42] tcg/arm: Rationalize args to tcg_out_qemu_{ld,st}, Richard Henderson, 2023/05/05
- [PULL 29/42] tcg/loongarch64: Introduce HostAddress, Richard Henderson, 2023/05/05
- [PULL 30/42] tcg/mips: Rationalize args to tcg_out_qemu_{ld,st}, Richard Henderson, 2023/05/05