qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 27/26] tcg-aarch64: Introduce tcg_out_insn_3312,


From: Claudio Fontana
Subject: Re: [Qemu-devel] [PATCH 27/26] tcg-aarch64: Introduce tcg_out_insn_3312, _3310, _3313
Date: Tue, 8 Apr 2014 11:00:23 +0200
User-agent: Mozilla/5.0 (Windows NT 6.1; rv:24.0) Gecko/20100101 Thunderbird/24.0.1

On 07.04.2014 20:34, Richard Henderson wrote:
> Merge TCGMemOp size, AArch64LdstType type and a few stray opcode bits
> into a single I3312_* argument, eliminating some magic numbers from
> helper functions.
> 
> Signed-off-by: Richard Henderson <address@hidden>
> ---
>  tcg/aarch64/tcg-target.c | 129 
> ++++++++++++++++++++++++++++-------------------
>  1 file changed, 76 insertions(+), 53 deletions(-)
> ---
> 
> I'm not really sure how much clearer this is, especially since we do
> have to re-extract the size within tcg_out_ldst.  But it does at least
> eliminate some of the magic numbers within the helpers.
> 
> Thoughts?

Looks good to me, I'll get to it in more detail later.

C.

> 
> 
> r~
> 
> 
> 
> diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
> index ab4cd25..324a452 100644
> --- a/tcg/aarch64/tcg-target.c
> +++ b/tcg/aarch64/tcg-target.c
> @@ -271,6 +271,28 @@ typedef enum {
>      I3207_BLR       = 0xd63f0000,
>      I3207_RET       = 0xd65f0000,
>  
> +    /* Load/store register.  Described here as 3.3.12, but the helper
> +       that emits them can transform to 3.3.10 or 3.3.13.  */
> +    I3312_STRB      = 0x38000000 | LDST_ST << 22 | MO_8 << 30,
> +    I3312_STRH      = 0x38000000 | LDST_ST << 22 | MO_16 << 30,
> +    I3312_STRW      = 0x38000000 | LDST_ST << 22 | MO_32 << 30,
> +    I3312_STRX      = 0x38000000 | LDST_ST << 22 | MO_64 << 30,
> +
> +    I3312_LDRB      = 0x38000000 | LDST_LD << 22 | MO_8 << 30,
> +    I3312_LDRH      = 0x38000000 | LDST_LD << 22 | MO_16 << 30,
> +    I3312_LDRW      = 0x38000000 | LDST_LD << 22 | MO_32 << 30,
> +    I3312_LDRX      = 0x38000000 | LDST_LD << 22 | MO_64 << 30,
> +
> +    I3312_LDRSBW    = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30,
> +    I3312_LDRSHW    = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30,
> +
> +    I3312_LDRSBX    = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30,
> +    I3312_LDRSHX    = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30,
> +    I3312_LDRSWX    = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30,
> +
> +    I3312_TO_I3310  = 0x00206800,
> +    I3312_TO_I3313  = 0x01000000,
> +
>      /* Load/store register pair instructions.  */
>      I3314_LDP       = 0x28400000,
>      I3314_STP       = 0x28000000,
> @@ -482,21 +504,25 @@ static void tcg_out_insn_3509(TCGContext *s, 
> AArch64Insn insn, TCGType ext,
>      tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd);
>  }
>  
> +static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
> +                              TCGReg rd, TCGReg base, TCGReg regoff)
> +{
> +    /* Note the AArch64Insn constants above are for C3.3.12.  Adjust.  */
> +    tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 | base << 5 | rd);
> +}
> +
>  
> -static void tcg_out_ldst_9(TCGContext *s, TCGMemOp size, AArch64LdstType 
> type,
> -                           TCGReg rd, TCGReg rn, intptr_t offset)
> +static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
> +                              TCGReg rd, TCGReg rn, intptr_t offset)
>  {
> -    /* use LDUR with BASE register with 9bit signed unscaled offset */
> -    tcg_out32(s, 0x38000000 | size << 30 | type << 22
> -              | (offset & 0x1ff) << 12 | rn << 5 | rd);
> +    tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | rd);
>  }
>  
> -/* tcg_out_ldst_12 expects a scaled unsigned immediate offset */
> -static void tcg_out_ldst_12(TCGContext *s, TCGMemOp size, AArch64LdstType 
> type,
> -                            TCGReg rd, TCGReg rn, tcg_target_ulong 
> scaled_uimm)
> +static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
> +                              TCGReg rd, TCGReg rn, uintptr_t scaled_uimm)
>  {
> -    tcg_out32(s, 0x39000000 | size << 30 | type << 22
> -              | scaled_uimm << 10 | rn << 5 | rd);
> +    /* Note the AArch64Insn constants above are for C3.3.12.  Adjust.  */
> +    tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10 | rn << 5 | rd);
>  }
>  
>  /* Register to register move using ORR (shifted register with no shift). */
> @@ -634,35 +660,32 @@ static void tcg_out_movi(TCGContext *s, TCGType type, 
> TCGReg rd,
>      }
>  }
>  
> -static void tcg_out_ldst_r(TCGContext *s, TCGMemOp size, AArch64LdstType 
> type,
> -                           TCGReg rd, TCGReg base, TCGReg regoff)
> -{
> -    tcg_out32(s, 0x38206800 | size << 30 | type << 22
> -              | regoff << 16 | base << 5 | rd);
> -}
> +/* Define something more legible for general use.  */
> +#define tcg_out_ldst_r  tcg_out_insn_3310
>  
> -/* solve the whole ldst problem */
> -static void tcg_out_ldst(TCGContext *s, TCGMemOp size, AArch64LdstType type,
> +static void tcg_out_ldst(TCGContext *s, AArch64Insn insn,
>                           TCGReg rd, TCGReg rn, intptr_t offset)
>  {
> +    TCGMemOp size = (uint32_t)insn >> 30;
> +
>      /* If the offset is naturally aligned and in range, then we can
>         use the scaled uimm12 encoding */
>      if (offset >= 0 && !(offset & ((1 << size) - 1))) {
> -        tcg_target_ulong scaled_uimm = offset >> size;
> +        uintptr_t scaled_uimm = offset >> size;
>          if (scaled_uimm <= 0xfff) {
> -            tcg_out_ldst_12(s, size, type, rd, rn, scaled_uimm);
> +            tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm);
>              return;
>          }
>      }
>  
>      if (offset >= -256 && offset < 256) {
> -        tcg_out_ldst_9(s, size, type, rd, rn, offset);
> +        tcg_out_insn_3312(s, insn, rd, rn, offset);
>          return;
>      }
>  
>      /* Worst-case scenario, move offset to temp register, use reg offset.  */
>      tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
> -    tcg_out_ldst_r(s, size, type, rd, rn, TCG_REG_TMP);
> +    tcg_out_ldst_r(s, insn, rd, rn, TCG_REG_TMP);
>  }
>  
>  static inline void tcg_out_mov(TCGContext *s,
> @@ -676,14 +699,14 @@ static inline void tcg_out_mov(TCGContext *s,
>  static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
>                                TCGReg arg1, intptr_t arg2)
>  {
> -    tcg_out_ldst(s, type == TCG_TYPE_I64 ? MO_64 : MO_32, LDST_LD,
> +    tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_LDRW : I3312_LDRX,
>                   arg, arg1, arg2);
>  }
>  
>  static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
>                                TCGReg arg1, intptr_t arg2)
>  {
> -    tcg_out_ldst(s, type == TCG_TYPE_I64 ? MO_64 : MO_32, LDST_ST,
> +    tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_STRW : I3312_STRX,
>                   arg, arg1, arg2);
>  }
>  
> @@ -1081,12 +1104,12 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg 
> addr_reg, TCGMemOp s_bits,
>  
>      /* Merge "low bits" from tlb offset, load the tlb comparator into X0.
>         X0 = load [X2 + (tlb_offset & 0x000fff)] */
> -    tcg_out_ldst(s, TARGET_LONG_BITS == 64 ? MO_64 : MO_32,
> -                 LDST_LD, TCG_REG_X0, TCG_REG_X2, tlb_offset & 0xfff);
> +    tcg_out_ldst(s, TARGET_LONG_BITS == 32 ? I3312_LDRW : I3312_LDRX,
> +                 TCG_REG_X0, TCG_REG_X2, tlb_offset & 0xfff);
>  
>      /* Load the tlb addend. Do that early to avoid stalling.
>         X1 = load [X2 + (tlb_offset & 0xfff) + offsetof(addend)] */
> -    tcg_out_ldst(s, MO_64, LDST_LD, TCG_REG_X1, TCG_REG_X2,
> +    tcg_out_ldst(s, I3312_LDRX, TCG_REG_X1, TCG_REG_X2,
>                   (tlb_offset & 0xfff) + (offsetof(CPUTLBEntry, addend)) -
>                   (is_read ? offsetof(CPUTLBEntry, addr_read)
>                    : offsetof(CPUTLBEntry, addr_write)));
> @@ -1108,43 +1131,43 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, 
> TCGMemOp memop,
>  
>      switch (memop & MO_SSIZE) {
>      case MO_UB:
> -        tcg_out_ldst_r(s, MO_8, LDST_LD, data_r, addr_r, off_r);
> +        tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, off_r);
>          break;
>      case MO_SB:
> -        tcg_out_ldst_r(s, MO_8, LDST_LD_S_X, data_r, addr_r, off_r);
> +        tcg_out_ldst_r(s, I3312_LDRSBX, data_r, addr_r, off_r);
>          break;
>      case MO_UW:
> -        tcg_out_ldst_r(s, MO_16, LDST_LD, data_r, addr_r, off_r);
> +        tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r);
>          if (bswap) {
>              tcg_out_rev16(s, TCG_TYPE_I32, data_r, data_r);
>          }
>          break;
>      case MO_SW:
>          if (bswap) {
> -            tcg_out_ldst_r(s, MO_16, LDST_LD, data_r, addr_r, off_r);
> +            tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r);
>              tcg_out_rev16(s, TCG_TYPE_I32, data_r, data_r);
>              tcg_out_sxt(s, TCG_TYPE_I64, MO_16, data_r, data_r);
>          } else {
> -            tcg_out_ldst_r(s, MO_16, LDST_LD_S_X, data_r, addr_r, off_r);
> +            tcg_out_ldst_r(s, I3312_LDRSHX, data_r, addr_r, off_r);
>          }
>          break;
>      case MO_UL:
> -        tcg_out_ldst_r(s, MO_32, LDST_LD, data_r, addr_r, off_r);
> +        tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r);
>          if (bswap) {
>              tcg_out_rev(s, TCG_TYPE_I32, data_r, data_r);
>          }
>          break;
>      case MO_SL:
>          if (bswap) {
> -            tcg_out_ldst_r(s, MO_32, LDST_LD, data_r, addr_r, off_r);
> +            tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r);
>              tcg_out_rev(s, TCG_TYPE_I32, data_r, data_r);
>              tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r);
>          } else {
> -            tcg_out_ldst_r(s, MO_32, LDST_LD_S_X, data_r, addr_r, off_r);
> +            tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, off_r);
>          }
>          break;
>      case MO_Q:
> -        tcg_out_ldst_r(s, MO_64, LDST_LD, data_r, addr_r, off_r);
> +        tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, off_r);
>          if (bswap) {
>              tcg_out_rev(s, TCG_TYPE_I64, data_r, data_r);
>          }
> @@ -1161,28 +1184,28 @@ static void tcg_out_qemu_st_direct(TCGContext *s, 
> TCGMemOp memop,
>  
>      switch (memop & MO_SIZE) {
>      case MO_8:
> -        tcg_out_ldst_r(s, MO_8, LDST_ST, data_r, addr_r, off_r);
> +        tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, off_r);
>          break;
>      case MO_16:
>          if (bswap && data_r != TCG_REG_XZR) {
>              tcg_out_rev16(s, TCG_TYPE_I32, TCG_REG_TMP, data_r);
>              data_r = TCG_REG_TMP;
>          }
> -        tcg_out_ldst_r(s, MO_16, LDST_ST, data_r, addr_r, off_r);
> +        tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, off_r);
>          break;
>      case MO_32:
>          if (bswap && data_r != TCG_REG_XZR) {
>              tcg_out_rev(s, TCG_TYPE_I32, TCG_REG_TMP, data_r);
>              data_r = TCG_REG_TMP;
>          }
> -        tcg_out_ldst_r(s, MO_32, LDST_ST, data_r, addr_r, off_r);
> +        tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, off_r);
>          break;
>      case MO_64:
>          if (bswap && data_r != TCG_REG_XZR) {
>              tcg_out_rev(s, TCG_TYPE_I64, TCG_REG_TMP, data_r);
>              data_r = TCG_REG_TMP;
>          }
> -        tcg_out_ldst_r(s, MO_64, LDST_ST, data_r, addr_r, off_r);
> +        tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, off_r);
>          break;
>      default:
>          tcg_abort();
> @@ -1275,49 +1298,49 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>  
>      case INDEX_op_ld8u_i32:
>      case INDEX_op_ld8u_i64:
> -        tcg_out_ldst(s, MO_8, LDST_LD, a0, a1, a2);
> +        tcg_out_ldst(s, I3312_LDRB, a0, a1, a2);
>          break;
>      case INDEX_op_ld8s_i32:
> -        tcg_out_ldst(s, MO_8, LDST_LD_S_W, a0, a1, a2);
> +        tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2);
>          break;
>      case INDEX_op_ld8s_i64:
> -        tcg_out_ldst(s, MO_8, LDST_LD_S_X, a0, a1, a2);
> +        tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2);
>          break;
>      case INDEX_op_ld16u_i32:
>      case INDEX_op_ld16u_i64:
> -        tcg_out_ldst(s, MO_16, LDST_LD, a0, a1, a2);
> +        tcg_out_ldst(s, I3312_LDRH, a0, a1, a2);
>          break;
>      case INDEX_op_ld16s_i32:
> -        tcg_out_ldst(s, MO_16, LDST_LD_S_W, a0, a1, a2);
> +        tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2);
>          break;
>      case INDEX_op_ld16s_i64:
> -        tcg_out_ldst(s, MO_16, LDST_LD_S_X, a0, a1, a2);
> +        tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2);
>          break;
>      case INDEX_op_ld_i32:
>      case INDEX_op_ld32u_i64:
> -        tcg_out_ldst(s, MO_32, LDST_LD, a0, a1, a2);
> +        tcg_out_ldst(s, I3312_LDRW, a0, a1, a2);
>          break;
>      case INDEX_op_ld32s_i64:
> -        tcg_out_ldst(s, MO_32, LDST_LD_S_X, a0, a1, a2);
> +        tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2);
>          break;
>      case INDEX_op_ld_i64:
> -        tcg_out_ldst(s, MO_64, LDST_LD, a0, a1, a2);
> +        tcg_out_ldst(s, I3312_LDRX, a0, a1, a2);
>          break;
>  
>      case INDEX_op_st8_i32:
>      case INDEX_op_st8_i64:
> -        tcg_out_ldst(s, MO_8, LDST_ST, REG0(0), a1, a2);
> +        tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2);
>          break;
>      case INDEX_op_st16_i32:
>      case INDEX_op_st16_i64:
> -        tcg_out_ldst(s, MO_16, LDST_ST, REG0(0), a1, a2);
> +        tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2);
>          break;
>      case INDEX_op_st_i32:
>      case INDEX_op_st32_i64:
> -        tcg_out_ldst(s, MO_32, LDST_ST, REG0(0), a1, a2);
> +        tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2);
>          break;
>      case INDEX_op_st_i64:
> -        tcg_out_ldst(s, MO_64, LDST_ST, REG0(0), a1, a2);
> +        tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2);
>          break;
>  
>      case INDEX_op_add_i32:
> 


-- 
Claudio Fontana
Server Virtualization Architect
Huawei Technologies Duesseldorf GmbH
Riesstraße 25 - 80992 München

office: +49 89 158834 4135
mobile: +49 15253060158




reply via email to

[Prev in Thread] Current Thread [Next in Thread]