qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v7 35/61] target/riscv: vector widening floating-point fused


From: Alistair Francis
Subject: Re: [PATCH v7 35/61] target/riscv: vector widening floating-point fused multiply-add instructions
Date: Fri, 17 Apr 2020 15:11:11 -0700

On Mon, Mar 30, 2020 at 9:47 AM LIU Zhiwei <address@hidden> wrote:
>
> Signed-off-by: LIU Zhiwei <address@hidden>
> Reviewed-by: Richard Henderson <address@hidden>

Reviewed-by: Alistair Francis <address@hidden>

Alistair

> ---
>  target/riscv/helper.h                   | 17 +++++
>  target/riscv/insn32.decode              |  8 +++
>  target/riscv/insn_trans/trans_rvv.inc.c | 10 +++
>  target/riscv/vector_helper.c            | 91 +++++++++++++++++++++++++
>  4 files changed, 126 insertions(+)
>
> diff --git a/target/riscv/helper.h b/target/riscv/helper.h
> index 5cd1694412..edf963e787 100644
> --- a/target/riscv/helper.h
> +++ b/target/riscv/helper.h
> @@ -900,3 +900,20 @@ DEF_HELPER_6(vfmsub_vf_d, void, ptr, ptr, i64, ptr, env, 
> i32)
>  DEF_HELPER_6(vfnmsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
>  DEF_HELPER_6(vfnmsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
>  DEF_HELPER_6(vfnmsub_vf_d, void, ptr, ptr, i64, ptr, env, i32)
> +
> +DEF_HELPER_6(vfwmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwnmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwnmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwnmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwnmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32)
> +DEF_HELPER_6(vfwmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32)
> +DEF_HELPER_6(vfwnmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32)
> +DEF_HELPER_6(vfwnmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32)
> +DEF_HELPER_6(vfwmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32)
> +DEF_HELPER_6(vfwmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32)
> +DEF_HELPER_6(vfwnmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32)
> +DEF_HELPER_6(vfwnmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32)
> diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
> index c42bcd141c..56bfd4a919 100644
> --- a/target/riscv/insn32.decode
> +++ b/target/riscv/insn32.decode
> @@ -479,6 +479,14 @@ vfmsub_vv       101010 . ..... ..... 001 ..... 1010111 
> @r_vm
>  vfmsub_vf       101010 . ..... ..... 101 ..... 1010111 @r_vm
>  vfnmsub_vv      101011 . ..... ..... 001 ..... 1010111 @r_vm
>  vfnmsub_vf      101011 . ..... ..... 101 ..... 1010111 @r_vm
> +vfwmacc_vv      111100 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwmacc_vf      111100 . ..... ..... 101 ..... 1010111 @r_vm
> +vfwnmacc_vv     111101 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwnmacc_vf     111101 . ..... ..... 101 ..... 1010111 @r_vm
> +vfwmsac_vv      111110 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwmsac_vf      111110 . ..... ..... 101 ..... 1010111 @r_vm
> +vfwnmsac_vv     111111 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwnmsac_vf     111111 . ..... ..... 101 ..... 1010111 @r_vm
>
>  vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm
>  vsetvl          1000000 ..... ..... 111 ..... 1010111  @r
> diff --git a/target/riscv/insn_trans/trans_rvv.inc.c 
> b/target/riscv/insn_trans/trans_rvv.inc.c
> index b2af9c314c..5e08c7133c 100644
> --- a/target/riscv/insn_trans/trans_rvv.inc.c
> +++ b/target/riscv/insn_trans/trans_rvv.inc.c
> @@ -2097,3 +2097,13 @@ GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
>  GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
>  GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
>  GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
> +
> +/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
> +GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
> +GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
> +GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
> +GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
> +GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
> +GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
> +GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
> +GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index 016f49507e..171252fb69 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -3682,3 +3682,94 @@ RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, 
> fnmsub64)
>  GEN_VEXT_VF(vfnmsub_vf_h, 2, 2, clearh)
>  GEN_VEXT_VF(vfnmsub_vf_w, 4, 4, clearl)
>  GEN_VEXT_VF(vfnmsub_vf_d, 8, 8, clearq)
> +
> +/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
> +static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
> +{
> +    return float32_muladd(float16_to_float32(a, true, s),
> +                        float16_to_float32(b, true, s), d, 0, s);
> +}
> +
> +static uint64_t fwmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
> +{
> +    return float64_muladd(float32_to_float64(a, s),
> +                        float32_to_float64(b, s), d, 0, s);
> +}
> +
> +RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16)
> +RVVCALL(OPFVV3, vfwmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwmacc32)
> +GEN_VEXT_VV_ENV(vfwmacc_vv_h, 2, 4, clearl)
> +GEN_VEXT_VV_ENV(vfwmacc_vv_w, 4, 8, clearq)
> +RVVCALL(OPFVF3, vfwmacc_vf_h, WOP_UUU_H, H4, H2, fwmacc16)
> +RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32)
> +GEN_VEXT_VF(vfwmacc_vf_h, 2, 4, clearl)
> +GEN_VEXT_VF(vfwmacc_vf_w, 4, 8, clearq)
> +
> +static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status 
> *s)
> +{
> +    return float32_muladd(float16_to_float32(a, true, s),
> +                        float16_to_float32(b, true, s), d,
> +                        float_muladd_negate_c | float_muladd_negate_product, 
> s);
> +}
> +
> +static uint64_t fwnmacc32(uint32_t a, uint32_t b, uint64_t d, float_status 
> *s)
> +{
> +    return float64_muladd(float32_to_float64(a, s),
> +                        float32_to_float64(b, s), d,
> +                        float_muladd_negate_c | float_muladd_negate_product, 
> s);
> +}
> +
> +RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16)
> +RVVCALL(OPFVV3, vfwnmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwnmacc32)
> +GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 2, 4, clearl)
> +GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 4, 8, clearq)
> +RVVCALL(OPFVF3, vfwnmacc_vf_h, WOP_UUU_H, H4, H2, fwnmacc16)
> +RVVCALL(OPFVF3, vfwnmacc_vf_w, WOP_UUU_W, H8, H4, fwnmacc32)
> +GEN_VEXT_VF(vfwnmacc_vf_h, 2, 4, clearl)
> +GEN_VEXT_VF(vfwnmacc_vf_w, 4, 8, clearq)
> +
> +static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
> +{
> +    return float32_muladd(float16_to_float32(a, true, s),
> +                        float16_to_float32(b, true, s), d,
> +                        float_muladd_negate_c, s);
> +}
> +
> +static uint64_t fwmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
> +{
> +    return float64_muladd(float32_to_float64(a, s),
> +                        float32_to_float64(b, s), d,
> +                        float_muladd_negate_c, s);
> +}
> +
> +RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16)
> +RVVCALL(OPFVV3, vfwmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwmsac32)
> +GEN_VEXT_VV_ENV(vfwmsac_vv_h, 2, 4, clearl)
> +GEN_VEXT_VV_ENV(vfwmsac_vv_w, 4, 8, clearq)
> +RVVCALL(OPFVF3, vfwmsac_vf_h, WOP_UUU_H, H4, H2, fwmsac16)
> +RVVCALL(OPFVF3, vfwmsac_vf_w, WOP_UUU_W, H8, H4, fwmsac32)
> +GEN_VEXT_VF(vfwmsac_vf_h, 2, 4, clearl)
> +GEN_VEXT_VF(vfwmsac_vf_w, 4, 8, clearq)
> +
> +static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status 
> *s)
> +{
> +    return float32_muladd(float16_to_float32(a, true, s),
> +                        float16_to_float32(b, true, s), d,
> +                        float_muladd_negate_product, s);
> +}
> +
> +static uint64_t fwnmsac32(uint32_t a, uint32_t b, uint64_t d, float_status 
> *s)
> +{
> +    return float64_muladd(float32_to_float64(a, s),
> +                        float32_to_float64(b, s), d,
> +                        float_muladd_negate_product, s);
> +}
> +
> +RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16)
> +RVVCALL(OPFVV3, vfwnmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwnmsac32)
> +GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 2, 4, clearl)
> +GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 4, 8, clearq)
> +RVVCALL(OPFVF3, vfwnmsac_vf_h, WOP_UUU_H, H4, H2, fwnmsac16)
> +RVVCALL(OPFVF3, vfwnmsac_vf_w, WOP_UUU_W, H8, H4, fwnmsac32)
> +GEN_VEXT_VF(vfwnmsac_vf_h, 2, 4, clearl)
> +GEN_VEXT_VF(vfwnmsac_vf_w, 4, 8, clearq)
> --
> 2.23.0
>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]