qemu-arm
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-arm] [Qemu-devel] [PATCH 6/9] target/arm: Introduce ARM_FEATUR


From: Peter Maydell
Subject: Re: [Qemu-arm] [Qemu-devel] [PATCH 6/9] target/arm: Introduce ARM_FEATURE_V8_ATOMICS and initial decode
Date: Thu, 3 May 2018 14:59:49 +0100

On 27 April 2018 at 01:26, Richard Henderson
<address@hidden> wrote:
> The insns in the ARMv8.1-Atomics are added to the existing
> load/store exclusive and load/store reg opcode spaces.
> Rearrange the top-level decoders for these to accomodate.
> The Atomics insns themselves still generate Unallocated.
>
> Signed-off-by: Richard Henderson <address@hidden>
> ---
> +    case 0x4: /* LDXR */
> +    case 0x5: /* LDAXR */
> +        if (rn == 31) {
> +            gen_check_sp_alignment(s);
>          }
> -    } else {
> -        TCGv_i64 tcg_rt = cpu_reg(s, rt);
> -        bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
> +        tcg_addr = read_cpu_reg_sp(s, rn, 1);
> +        s->is_ldex = true;
> +        gen_load_exclusive(s, rt, rt2, tcg_addr, size, false);
> +        if (is_lasr) {
> +            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
> +        }
> +        return;
>
> +    case 0x9: /* STLR */
>          /* Generate ISS for non-exclusive accesses including LASR.  */
> -        if (is_store) {
> +        if (rn == 31) {
> +            gen_check_sp_alignment(s);
> +        }
> +        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
> +        tcg_addr = read_cpu_reg_sp(s, rn, 1);
> +        do_gpr_st(s, cpu_reg(s, rt), tcg_addr, size, true, rt,
> +                  disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
> +        return;
> +
> +    case 0xd: /* LDARB */

should be /* LDAR */ to match lack of size suffixes on other
comments in the switch ?

> +        /* Generate ISS for non-exclusive accesses including LASR.  */
> +        if (rn == 31) {
> +            gen_check_sp_alignment(s);
> +        }
> +        tcg_addr = read_cpu_reg_sp(s, rn, 1);
> +        do_gpr_ld(s, cpu_reg(s, rt), tcg_addr, size, false, false, true, rt,
> +                  disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
> +        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
> +        return;
> +
> +    case 0x2: case 0x3: /* CASP / STXP */
> +        if (size & 2) { /* STXP / STLXP */
> +            if (rn == 31) {
> +                gen_check_sp_alignment(s);
> +            }
>              if (is_lasr) {
>                  tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
>              }
> -            do_gpr_st(s, tcg_rt, tcg_addr, size,
> -                      true, rt, iss_sf, is_lasr);
> -        } else {
> -            do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
> -                      true, rt, iss_sf, is_lasr);
> +            tcg_addr = read_cpu_reg_sp(s, rn, 1);
> +            gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
> +            return;
> +        }
> +        /* CASP / CASPL */
> +        break;
> +
> +    case 0x6: case 0x7: /* CASP / LDXP */
> +        if (size & 2) { /* LDXP / LDAXP */
> +            if (rn == 31) {
> +                gen_check_sp_alignment(s);
> +            }
> +            tcg_addr = read_cpu_reg_sp(s, rn, 1);
> +            s->is_ldex = true;
> +            gen_load_exclusive(s, rt, rt2, tcg_addr, size, true);
>              if (is_lasr) {
>                  tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
>              }
> +            return;
>          }
> +        /* CASPA / CASPAL */
> +        break;
> +
> +    case 0xa: /* CAS */
> +    case 0xb: /* CASL */
> +    case 0xe: /* CASA */
> +    case 0xf: /* CASAL */
> +        break;
>      }
> +    unallocated_encoding(s);
>  }
>
>  /*
> @@ -2715,6 +2751,55 @@ static void disas_ldst_reg_unsigned_imm(DisasContext 
> *s, uint32_t insn,
>      }
>  }
>
> +/* Atomic memory operations
> + *
> + *  31  30      27  26    24    22  21   16   15    12    10    5     0
> + * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
> + * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn |  Rt |
> + * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
> + *
> + * Rt: the result register
> + * Rn: base address or SP
> + * Rs: the source register for the operation
> + * V: vector flag (always 0 as of v8.3)
> + * A: acquire flag
> + * R: release flag
> + */
> +static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
> +                              int size, int rt, bool is_vector)
> +{
> +    int rs = extract32(insn, 16, 5);
> +    int rn = extract32(insn, 5, 5);
> +    int o3_opc = extract32(insn, 12, 4);
> +    int feature = ARM_FEATURE_V8_ATOMICS;
> +
> +    if (is_vector) {
> +        unallocated_encoding(s);
> +        return;
> +    }
> +    switch (o3_opc) {
> +    case 000: /* LDADD */
> +    case 001: /* LDCLR */
> +    case 002: /* LDEOR */
> +    case 003: /* LDSET */
> +    case 004: /* LDSMAX */
> +    case 005: /* LDSMIN */
> +    case 006: /* LDUMAX */
> +    case 007: /* LDUMIN */
> +    case 010: /* SWP */

I can see why you've used octal constants here, but I think they're
probably going to be more confusing than helpful since they're
so rare in our codebase.

What about LDAPRB, LDAPRH, LDAPR (which are o3_opc == 0b1100) ?

> +    default:
> +        unallocated_encoding(s);
> +        return;
> +    }
> +    if (!arm_dc_feature(s, feature)) {
> +        unallocated_encoding(s);
> +        return;
> +    }
> +
> +    (void)rs;
> +    (void)rn;
> +}

Otherwise
Reviewed-by: Peter Maydell <address@hidden>

thanks
-- PMM



reply via email to

[Prev in Thread] Current Thread [Next in Thread]