[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL v2 09/12] accel/tcg: Add cpu_ld*_code_mmu
|
From: |
Richard Henderson |
|
Subject: |
[PULL v2 09/12] accel/tcg: Add cpu_ld*_code_mmu |
|
Date: |
Wed, 3 May 2023 08:21:02 +0100 |
At least RISC-V has the need to be able to perform a read
using execute permissions, outside of translation.
Add helpers to facilitate this.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Tested-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Message-Id: <20230325105429.1142530-9-richard.henderson@linaro.org>
Message-Id: <20230412114333.118895-9-richard.henderson@linaro.org>
---
include/exec/cpu_ldst.h | 9 +++++++
accel/tcg/cputlb.c | 48 ++++++++++++++++++++++++++++++++++
accel/tcg/user-exec.c | 58 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 115 insertions(+)
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index 09b55cc0ee..c141f0394f 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -445,6 +445,15 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env,
uintptr_t mmu_idx,
# define cpu_stq_mmu cpu_stq_le_mmu
#endif
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index efa0cb67c9..c8bd642d0e 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -2773,3 +2773,51 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
return full_ldq_code(env, addr, oi, 0);
}
+
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ return full_ldub_code(env, addr, oi, retaddr);
+}
+
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ MemOp mop = get_memop(oi);
+ int idx = get_mmuidx(oi);
+ uint16_t ret;
+
+ ret = full_lduw_code(env, addr, make_memop_idx(MO_TEUW, idx), retaddr);
+ if ((mop & MO_BSWAP) != MO_TE) {
+ ret = bswap16(ret);
+ }
+ return ret;
+}
+
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ MemOp mop = get_memop(oi);
+ int idx = get_mmuidx(oi);
+ uint32_t ret;
+
+ ret = full_ldl_code(env, addr, make_memop_idx(MO_TEUL, idx), retaddr);
+ if ((mop & MO_BSWAP) != MO_TE) {
+ ret = bswap32(ret);
+ }
+ return ret;
+}
+
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ MemOp mop = get_memop(oi);
+ int idx = get_mmuidx(oi);
+ uint64_t ret;
+
+ ret = full_ldq_code(env, addr, make_memop_idx(MO_TEUQ, idx), retaddr);
+ if ((mop & MO_BSWAP) != MO_TE) {
+ ret = bswap64(ret);
+ }
+ return ret;
+}
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index a7e0c3e2f4..fc597a010d 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -1219,6 +1219,64 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
return ret;
}
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ void *haddr;
+ uint8_t ret;
+
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
+ ret = ldub_p(haddr);
+ clear_helper_retaddr();
+ return ret;
+}
+
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ void *haddr;
+ uint16_t ret;
+
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
+ ret = lduw_p(haddr);
+ clear_helper_retaddr();
+ if (get_memop(oi) & MO_BSWAP) {
+ ret = bswap16(ret);
+ }
+ return ret;
+}
+
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ void *haddr;
+ uint32_t ret;
+
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
+ ret = ldl_p(haddr);
+ clear_helper_retaddr();
+ if (get_memop(oi) & MO_BSWAP) {
+ ret = bswap32(ret);
+ }
+ return ret;
+}
+
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ void *haddr;
+ uint64_t ret;
+
+ validate_memop(oi, MO_BEUQ);
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
+ ret = ldq_p(haddr);
+ clear_helper_retaddr();
+ if (get_memop(oi) & MO_BSWAP) {
+ ret = bswap64(ret);
+ }
+ return ret;
+}
+
#include "ldst_common.c.inc"
/*
--
2.34.1
- [PULL v2 02/12] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1, (continued)
- [PULL v2 02/12] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1, Richard Henderson, 2023/05/03
- [PATCH 01/84] tcg: Split out memory ops to tcg-op-ldst.c, Richard Henderson, 2023/05/03
- [PATCH 09/84] tcg: Reduce copies for plugin_gen_mem_callbacks, Richard Henderson, 2023/05/03
- [PULL v2 06/12] tcg: Add tcg_gen_gvec_rotrs, Richard Henderson, 2023/05/03
- [PULL v2 07/12] qemu/int128: Re-shuffle Int128Alias members, Richard Henderson, 2023/05/03
- [PATCH 10/84] accel/tcg: Widen plugin_gen_empty_mem_callback to i64, Richard Henderson, 2023/05/03
- [PULL v2 04/12] qemu/host-utils.h: Add clz and ctz functions for lower-bit integers, Richard Henderson, 2023/05/03
- [PULL v2 03/12] qemu/bitops.h: Limit rotate amounts, Richard Henderson, 2023/05/03
- [PATCH 05/84] tcg: Widen helper_atomic_* addresses to uint64_t, Richard Henderson, 2023/05/03
- [PATCH 07/84] accel/tcg: Merge gen_mem_wrapped with plugin_gen_empty_mem_callback, Richard Henderson, 2023/05/03
- [PULL v2 09/12] accel/tcg: Add cpu_ld*_code_mmu,
Richard Henderson <=
- [PULL v2 08/12] migration/xbzrle: Use __attribute__((target)) for avx512, Richard Henderson, 2023/05/03
- [PULL v2 10/12] tcg/loongarch64: Conditionalize tcg_out_exts_i32_i64, Richard Henderson, 2023/05/03