[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 13/80] tcg/i386: Use full load/store helpers in user-only mode
|
From: |
Richard Henderson |
|
Subject: |
[PULL 13/80] tcg/i386: Use full load/store helpers in user-only mode |
|
Date: |
Tue, 16 May 2023 12:40:38 -0700 |
Instead of using helper_unaligned_{ld,st}, use the full load/store helpers.
This will allow the fast path to increase alignment to implement atomicity
while not immediately raising an alignment exception.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/i386/tcg-target.c.inc | 52 +++------------------------------------
1 file changed, 4 insertions(+), 48 deletions(-)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 911123cfa8..21553f3c39 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -1776,7 +1776,6 @@ typedef struct {
int seg;
} HostAddress;
-#if defined(CONFIG_SOFTMMU)
/*
* Because i686 has no register parameters and because x86_64 has xchg
* to handle addr/data register overlap, we have placed all input arguments
@@ -1812,7 +1811,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s,
TCGLabelQemuLdst *l)
/* resolve label address */
tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ if (label_ptr[1]) {
tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
}
@@ -1834,7 +1833,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s,
TCGLabelQemuLdst *l)
/* resolve label address */
tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ if (label_ptr[1]) {
tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
}
@@ -1844,51 +1843,8 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s,
TCGLabelQemuLdst *l)
tcg_out_jmp(s, l->raddr);
return true;
}
-#else
-static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
-{
- /* resolve label address */
- tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4);
-
- if (TCG_TARGET_REG_BITS == 32) {
- int ofs = 0;
-
- tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
- ofs += 4;
-
- tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- if (TARGET_LONG_BITS == 64) {
- tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- }
-
- tcg_out_pushi(s, (uintptr_t)l->raddr);
- } else {
- tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
- l->addrlo_reg);
- tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
-
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr);
- tcg_out_push(s, TCG_REG_RAX);
- }
-
- /* "Tail call" to the helper, with the return address back inline. */
- tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld
- : helper_unaligned_st));
- return true;
-}
-
-static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- return tcg_out_fail_alignment(s, l);
-}
-
-static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- return tcg_out_fail_alignment(s, l);
-}
+#ifndef CONFIG_SOFTMMU
static HostAddress x86_guest_base = {
.index = -1
};
@@ -1920,7 +1876,7 @@ static inline int setup_guest_base_seg(void)
return 0;
}
#endif /* setup_guest_base_seg */
-#endif /* SOFTMMU */
+#endif /* !SOFTMMU */
/*
* For softmmu, perform the TLB load and compare.
--
2.34.1
- [PULL 01/80] tcg/i386: Set P_REXW in tcg_out_addi_ptr, (continued)
- [PULL 01/80] tcg/i386: Set P_REXW in tcg_out_addi_ptr, Richard Henderson, 2023/05/16
- [PULL 03/80] accel/tcg: Honor atomicity of loads, Richard Henderson, 2023/05/16
- [PULL 07/80] tcg/tci: Use helper_{ld,st}*_mmu for user-only, Richard Henderson, 2023/05/16
- [PULL 05/80] tcg: Unify helper_{be,le}_{ld,st}*, Richard Henderson, 2023/05/16
- [PULL 09/80] meson: Detect atomic128 support with optimization, Richard Henderson, 2023/05/16
- [PULL 06/80] accel/tcg: Implement helper_{ld,st}*_mmu for user-only, Richard Henderson, 2023/05/16
- [PULL 08/80] tcg: Add 128-bit guest memory primitives, Richard Henderson, 2023/05/16
- [PULL 04/80] accel/tcg: Honor atomicity of stores, Richard Henderson, 2023/05/16
- [PULL 10/80] tcg/i386: Add have_atomic16, Richard Henderson, 2023/05/16
- [PULL 11/80] tcg/aarch64: Detect have_lse, have_lse2 for linux, Richard Henderson, 2023/05/16
- [PULL 13/80] tcg/i386: Use full load/store helpers in user-only mode,
Richard Henderson <=
- [PULL 14/80] tcg/aarch64: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/16
- [PULL 12/80] tcg/aarch64: Detect have_lse, have_lse2 for darwin, Richard Henderson, 2023/05/16
- [PULL 17/80] tcg/riscv: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/16
- [PULL 15/80] tcg/ppc: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/16
- [PULL 16/80] tcg/loongarch64: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/16
- [PULL 18/80] tcg/arm: Adjust constraints on qemu_ld/st, Richard Henderson, 2023/05/16
- [PULL 19/80] tcg/arm: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/16
- [PULL 20/80] tcg/mips: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/16
- [PULL 21/80] tcg/s390x: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/16
- [PULL 22/80] tcg/sparc64: Allocate %g2 as a third temporary, Richard Henderson, 2023/05/16