[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 05/16] target/i386: Use explicit little-endian LD/ST API
From: |
Philippe Mathieu-Daudé |
Subject: |
[PATCH 05/16] target/i386: Use explicit little-endian LD/ST API |
Date: |
Fri, 4 Oct 2024 01:42:00 +0200 |
The x86 architecture uses little endianness. Directly use
the little-endian LD/ST API.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
target/i386/gdbstub.c | 26 +++++++++++-----------
target/i386/tcg/sysemu/excp_helper.c | 4 ++--
target/i386/xsave_helper.c | 32 ++++++++++++++--------------
3 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c
index 4acf485879e..40c8cb6dc46 100644
--- a/target/i386/gdbstub.c
+++ b/target/i386/gdbstub.c
@@ -89,10 +89,10 @@ static int gdb_read_reg_cs64(uint32_t hflags, GByteArray
*buf, target_ulong val)
static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val)
{
if (hflags & HF_CS64_MASK) {
- *val = ldq_p(buf);
+ *val = ldq_le_p(buf);
return 8;
}
- *val = ldl_p(buf);
+ *val = ldl_le_p(buf);
return 4;
}
@@ -221,7 +221,7 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray
*mem_buf, int n)
static int x86_cpu_gdb_load_seg(X86CPU *cpu, X86Seg sreg, uint8_t *mem_buf)
{
CPUX86State *env = &cpu->env;
- uint16_t selector = ldl_p(mem_buf);
+ uint16_t selector = ldl_le_p(mem_buf);
if (selector != env->segs[sreg].selector) {
#if defined(CONFIG_USER_ONLY)
@@ -270,7 +270,7 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t
*mem_buf, int n)
} else if (n < CPU_NB_REGS32) {
n = gpr_map32[n];
env->regs[n] &= ~0xffffffffUL;
- env->regs[n] |= (uint32_t)ldl_p(mem_buf);
+ env->regs[n] |= (uint32_t)ldl_le_p(mem_buf);
return 4;
}
} else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
@@ -281,8 +281,8 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t
*mem_buf, int n)
} else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
n -= IDX_XMM_REGS;
if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) {
- env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf);
- env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8);
+ env->xmm_regs[n].ZMM_Q(0) = ldq_le_p(mem_buf);
+ env->xmm_regs[n].ZMM_Q(1) = ldq_le_p(mem_buf + 8);
return 16;
}
} else {
@@ -290,18 +290,18 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t
*mem_buf, int n)
case IDX_IP_REG:
if (TARGET_LONG_BITS == 64) {
if (env->hflags & HF_CS64_MASK) {
- env->eip = ldq_p(mem_buf);
+ env->eip = ldq_le_p(mem_buf);
} else {
- env->eip = ldq_p(mem_buf) & 0xffffffffUL;
+ env->eip = ldq_le_p(mem_buf) & 0xffffffffUL;
}
return 8;
} else {
env->eip &= ~0xffffffffUL;
- env->eip |= (uint32_t)ldl_p(mem_buf);
+ env->eip |= (uint32_t)ldl_le_p(mem_buf);
return 4;
}
case IDX_FLAGS_REG:
- env->eflags = ldl_p(mem_buf);
+ env->eflags = ldl_le_p(mem_buf);
return 4;
case IDX_SEG_REGS:
@@ -327,10 +327,10 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t
*mem_buf, int n)
return 4;
case IDX_FP_REGS + 8:
- cpu_set_fpuc(env, ldl_p(mem_buf));
+ cpu_set_fpuc(env, ldl_le_p(mem_buf));
return 4;
case IDX_FP_REGS + 9:
- tmp = ldl_p(mem_buf);
+ tmp = ldl_le_p(mem_buf);
env->fpstt = (tmp >> 11) & 7;
env->fpus = tmp & ~0x3800;
return 4;
@@ -348,7 +348,7 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t
*mem_buf, int n)
return 4;
case IDX_MXCSR_REG:
- cpu_set_mxcsr(env, ldl_p(mem_buf));
+ cpu_set_mxcsr(env, ldl_le_p(mem_buf));
return 4;
case IDX_CTL_CR0_REG:
diff --git a/target/i386/tcg/sysemu/excp_helper.c
b/target/i386/tcg/sysemu/excp_helper.c
index 8fb05b1f531..de6765099f3 100644
--- a/target/i386/tcg/sysemu/excp_helper.c
+++ b/target/i386/tcg/sysemu/excp_helper.c
@@ -86,7 +86,7 @@ static bool ptw_translate(PTETranslate *inout, hwaddr addr,
uint64_t ra)
static inline uint32_t ptw_ldl(const PTETranslate *in, uint64_t ra)
{
if (likely(in->haddr)) {
- return ldl_p(in->haddr);
+ return ldl_le_p(in->haddr);
}
return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
}
@@ -94,7 +94,7 @@ static inline uint32_t ptw_ldl(const PTETranslate *in,
uint64_t ra)
static inline uint64_t ptw_ldq(const PTETranslate *in, uint64_t ra)
{
if (likely(in->haddr)) {
- return ldq_p(in->haddr);
+ return ldq_le_p(in->haddr);
}
return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra);
}
diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c
index 996e9f3bfef..fc10bfa6718 100644
--- a/target/i386/xsave_helper.c
+++ b/target/i386/xsave_helper.c
@@ -43,8 +43,8 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t
buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
uint8_t *xmm = legacy->xmm_regs[i];
- stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
- stq_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
+ stq_le_p(xmm, env->xmm_regs[i].ZMM_Q(0));
+ stq_le_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
}
header->xstate_bv = env->xstate_bv;
@@ -58,8 +58,8 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t
buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
uint8_t *ymmh = avx->ymmh[i];
- stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
- stq_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
+ stq_le_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
+ stq_le_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
}
}
@@ -101,10 +101,10 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf,
uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
- stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
- stq_p(zmmh + 8, env->xmm_regs[i].ZMM_Q(5));
- stq_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
- stq_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
+ stq_le_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
+ stq_le_p(zmmh + 8, env->xmm_regs[i].ZMM_Q(5));
+ stq_le_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
+ stq_le_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
}
#ifdef TARGET_X86_64
@@ -177,8 +177,8 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf,
uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
const uint8_t *xmm = legacy->xmm_regs[i];
- env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
- env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm + 8);
+ env->xmm_regs[i].ZMM_Q(0) = ldq_le_p(xmm);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_le_p(xmm + 8);
}
env->xstate_bv = header->xstate_bv;
@@ -191,8 +191,8 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf,
uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
const uint8_t *ymmh = avx->ymmh[i];
- env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
- env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh + 8);
+ env->xmm_regs[i].ZMM_Q(2) = ldq_le_p(ymmh);
+ env->xmm_regs[i].ZMM_Q(3) = ldq_le_p(ymmh + 8);
}
}
@@ -241,10 +241,10 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void
*buf, uint32_t buflen)
for (i = 0; i < CPU_NB_REGS; i++) {
const uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
- env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
- env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh + 8);
- env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh + 16);
- env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh + 24);
+ env->xmm_regs[i].ZMM_Q(4) = ldq_le_p(zmmh);
+ env->xmm_regs[i].ZMM_Q(5) = ldq_le_p(zmmh + 8);
+ env->xmm_regs[i].ZMM_Q(6) = ldq_le_p(zmmh + 16);
+ env->xmm_regs[i].ZMM_Q(7) = ldq_le_p(zmmh + 24);
}
#ifdef TARGET_X86_64
--
2.45.2
- [PATCH 00/16] misc: Use explicit endian LD/ST API, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 01/16] qemu/bswap: Undefine CPU_CONVERT() once done, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 02/16] exec/memop: Remove unused memop_big_endian() helper, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 03/16] linux-user/i386: Use explicit little-endian LD/ST API, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 04/16] hw/i386: Use explicit little-endian LD/ST API, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 05/16] target/i386: Use explicit little-endian LD/ST API,
Philippe Mathieu-Daudé <=
- [PATCH 06/16] hw/m68k: Use explicit big-endian LD/ST API, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 07/16] target/m68k: Use explicit big-endian LD/ST API, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 08/16] hw/ppc/e500: Use explicit big-endian LD/ST API, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 09/16] hw/s390x: Use explicit big-endian LD/ST API, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 10/16] target/s390x: Use explicit big-endian LD/ST API, Philippe Mathieu-Daudé, 2024/10/03
- [PATCH 11/16] hw/sparc: Use explicit big-endian LD/ST API, Philippe Mathieu-Daudé, 2024/10/03