[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v1 09/15] tcg/riscv: Implement vector cmp ops
|
From: |
LIU Zhiwei |
|
Subject: |
[PATCH v1 09/15] tcg/riscv: Implement vector cmp ops |
|
Date: |
Tue, 13 Aug 2024 19:34:30 +0800 |
From: TANG Tiancheng <tangtiancheng.ttc@alibaba-inc.com>
1.Address immediate value constraints in RISC-V Vector Extension 1.0 for
comparison instructions.
2.Extend comparison results from mask registers to SEW-width elements,
following recommendations in The RISC-V SPEC Volume I (Version 20240411).
This aligns with TCG's cmp_vec behavior by expanding compare results to
full element width: all 1s for true, all 0s for false.
Signed-off-by: TANG Tiancheng <tangtiancheng.ttc@alibaba-inc.com>
Reviewed-by: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
---
tcg/riscv/tcg-target-con-set.h | 2 +
tcg/riscv/tcg-target-con-str.h | 1 +
tcg/riscv/tcg-target.c.inc | 188 +++++++++++++++++++++++++++++++++
tcg/riscv/tcg-target.opc.h | 3 +
4 files changed, 194 insertions(+)
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
index 8a0de18257..23b391dd07 100644
--- a/tcg/riscv/tcg-target-con-set.h
+++ b/tcg/riscv/tcg-target-con-set.h
@@ -22,5 +22,7 @@ C_N1_I2(r, r, rM)
C_O1_I4(r, r, rI, rM, rM)
C_O2_I4(r, r, rZ, rZ, rM, rM)
C_O0_I2(v, r)
+C_O0_I2(v, vK)
C_O1_I1(v, r)
C_O1_I2(v, v, v)
+C_O1_I2(v, v, vK)
diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h
index b2b3211bcb..0aaad7b753 100644
--- a/tcg/riscv/tcg-target-con-str.h
+++ b/tcg/riscv/tcg-target-con-str.h
@@ -17,6 +17,7 @@ REGS('v', ALL_VECTOR_REGS)
*/
CONST('I', TCG_CT_CONST_S12)
CONST('J', TCG_CT_CONST_J12)
+CONST('K', TCG_CT_CONST_S5)
CONST('N', TCG_CT_CONST_N12)
CONST('M', TCG_CT_CONST_M12)
CONST('Z', TCG_CT_CONST_ZERO)
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index 650b5eff1a..3f1e215e90 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -113,6 +113,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind
kind, int slot)
#define TCG_CT_CONST_N12 0x400
#define TCG_CT_CONST_M12 0x800
#define TCG_CT_CONST_J12 0x1000
+#define TCG_CT_CONST_S5 0x2000
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(33, 31)
@@ -160,6 +161,13 @@ static bool tcg_target_const_match(int64_t val, int ct,
if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) {
return 1;
}
+ /*
+ * Sign extended from 5 bits: [-0x10, 0x0f].
+ * Used for vector-immediate.
+ */
+ if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) {
+ return 1;
+ }
return 0;
}
@@ -289,12 +297,39 @@ typedef enum {
OPC_VSE32_V = 0x6027 | V_SUMOP,
OPC_VSE64_V = 0x7027 | V_SUMOP,
+ OPC_VMERGE_VIM = 0x5c000057 | V_OPIVI,
+ OPC_VMERGE_VVM = 0x5c000057 | V_OPIVV,
+ OPC_VMNAND_MM = 0x74000057 | V_OPMVV,
+
OPC_VADD_VV = 0x57 | V_OPIVV,
OPC_VSUB_VV = 0x8000057 | V_OPIVV,
OPC_VAND_VV = 0x24000057 | V_OPIVV,
OPC_VOR_VV = 0x28000057 | V_OPIVV,
OPC_VXOR_VV = 0x2c000057 | V_OPIVV,
+ OPC_VMSEQ_VV = 0x60000057 | V_OPIVV,
+ OPC_VMSEQ_VI = 0x60000057 | V_OPIVI,
+ OPC_VMSEQ_VX = 0x60000057 | V_OPIVX,
+ OPC_VMSNE_VV = 0x64000057 | V_OPIVV,
+ OPC_VMSNE_VI = 0x64000057 | V_OPIVI,
+ OPC_VMSNE_VX = 0x64000057 | V_OPIVX,
+
+ OPC_VMSLTU_VV = 0x68000057 | V_OPIVV,
+ OPC_VMSLTU_VX = 0x68000057 | V_OPIVX,
+ OPC_VMSLT_VV = 0x6c000057 | V_OPIVV,
+ OPC_VMSLT_VX = 0x6c000057 | V_OPIVX,
+ OPC_VMSLEU_VV = 0x70000057 | V_OPIVV,
+ OPC_VMSLEU_VX = 0x70000057 | V_OPIVX,
+ OPC_VMSLE_VV = 0x74000057 | V_OPIVV,
+ OPC_VMSLE_VX = 0x74000057 | V_OPIVX,
+
+ OPC_VMSLEU_VI = 0x70000057 | V_OPIVI,
+ OPC_VMSLE_VI = 0x74000057 | V_OPIVI,
+ OPC_VMSGTU_VI = 0x78000057 | V_OPIVI,
+ OPC_VMSGTU_VX = 0x78000057 | V_OPIVX,
+ OPC_VMSGT_VI = 0x7c000057 | V_OPIVI,
+ OPC_VMSGT_VX = 0x7c000057 | V_OPIVX,
+
OPC_VMV_V_V = 0x5e000057 | V_OPIVV,
OPC_VMV_V_I = 0x5e000057 | V_OPIVI,
OPC_VMV_V_X = 0x5e000057 | V_OPIVX,
@@ -575,6 +610,15 @@ static void tcg_out_opc_vec_config(TCGContext *s,
RISCVInsn opc,
#define tcg_out_opc_vi(s, opc, vd, vs2, imm, vm) \
tcg_out_opc_reg_vec_i(s, opc, vd, imm, vs2, vm);
+#define tcg_out_opc_vim_mask(s, opc, vd, vs2, imm) \
+ tcg_out_opc_reg_vec_i(s, opc, vd, imm, vs2, false);
+
+#define tcg_out_opc_vvm_mask(s, opc, vd, vs2, vs1) \
+ tcg_out_opc_reg_vec(s, opc, vd, vs1, vs2, false);
+
+#define tcg_out_opc_mvv(s, opc, vd, vs2, vs1, vm) \
+ tcg_out_opc_reg_vec(s, opc, vd, vs1, vs2, vm);
+
#define tcg_out_opc_vconfig(s, opc, rd, avl, vtypei) \
tcg_out_opc_vec_config(s, opc, rd, avl, vtypei);
@@ -1037,6 +1081,22 @@ static const struct {
[TCG_COND_GTU] = { OPC_BLTU, true }
};
+static const struct {
+ RISCVInsn opc;
+ bool swap;
+} tcg_cmpcond_to_rvv_vv[] = {
+ [TCG_COND_EQ] = { OPC_VMSEQ_VV, false },
+ [TCG_COND_NE] = { OPC_VMSNE_VV, false },
+ [TCG_COND_LT] = { OPC_VMSLT_VV, false },
+ [TCG_COND_GE] = { OPC_VMSLE_VV, true },
+ [TCG_COND_GT] = { OPC_VMSLT_VV, true },
+ [TCG_COND_LE] = { OPC_VMSLE_VV, false },
+ [TCG_COND_LTU] = { OPC_VMSLTU_VV, false },
+ [TCG_COND_GEU] = { OPC_VMSLEU_VV, true },
+ [TCG_COND_GTU] = { OPC_VMSLTU_VV, true },
+ [TCG_COND_LEU] = { OPC_VMSLEU_VV, false }
+};
+
static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
TCGReg arg2, TCGLabel *l)
{
@@ -1054,6 +1114,79 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond,
TCGReg arg1,
tcg_out_opc_branch(s, op, arg1, arg2, 0);
}
+static const struct {
+ RISCVInsn op;
+ bool expand;
+} tcg_cmpcond_to_rvv_vx[] = {
+ [TCG_COND_EQ] = { OPC_VMSEQ_VX, false },
+ [TCG_COND_NE] = { OPC_VMSNE_VX, false },
+ [TCG_COND_GT] = { OPC_VMSGT_VX, false },
+ [TCG_COND_LE] = { OPC_VMSLE_VX, false },
+ [TCG_COND_LT] = { OPC_VMSLT_VX, false },
+ [TCG_COND_LTU] = { OPC_VMSLTU_VX, false },
+ [TCG_COND_GTU] = { OPC_VMSGTU_VX, false },
+ [TCG_COND_LEU] = { OPC_VMSLEU_VX, false },
+ [TCG_COND_GE] = { OPC_VMSLT_VX, true },
+ [TCG_COND_GEU] = { OPC_VMSLTU_VX, true },
+};
+
+static void tcg_out_cmp_vec_vx(TCGContext *s, TCGCond cond, TCGReg arg1,
+ tcg_target_long arg2)
+{
+ RISCVInsn op;
+
+ tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_cmpcond_to_rvv_vx));
+ op = tcg_cmpcond_to_rvv_vx[cond].op;
+ tcg_debug_assert(op != 0);
+
+ tcg_out_opc_vx(s, op, TCG_REG_V0, arg1, arg2, true);
+ if (tcg_cmpcond_to_rvv_vx[cond].expand) {
+ tcg_out_opc_mvv(s, OPC_VMNAND_MM, TCG_REG_V0, TCG_REG_V0,
+ TCG_REG_V0, false);
+ }
+}
+
+static const struct {
+ RISCVInsn op;
+ int min;
+ int max;
+ bool adjust;
+} tcg_cmpcond_to_rvv_vi[] = {
+ [TCG_COND_EQ] = { OPC_VMSEQ_VI, -16, 15, false },
+ [TCG_COND_NE] = { OPC_VMSNE_VI, -16, 15, false },
+ [TCG_COND_GT] = { OPC_VMSGT_VI, -16, 15, false },
+ [TCG_COND_LE] = { OPC_VMSLE_VI, -16, 15, false },
+ [TCG_COND_LT] = { OPC_VMSLE_VI, -15, 16, true },
+ [TCG_COND_GE] = { OPC_VMSGT_VI, -15, 16, true },
+ [TCG_COND_LEU] = { OPC_VMSLEU_VI, 0, 15, false },
+ [TCG_COND_GTU] = { OPC_VMSGTU_VI, 0, 15, false },
+ [TCG_COND_LTU] = { OPC_VMSLEU_VI, 1, 16, true },
+ [TCG_COND_GEU] = { OPC_VMSGTU_VI, 1, 16, true },
+};
+
+static void tcg_out_cmp_vec_vi(TCGContext *s, TCGCond cond, TCGReg arg1,
+ tcg_target_long arg2)
+{
+ RISCVInsn op;
+ signed imm_min, imm_max;
+
+ tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_cmpcond_to_rvv_vi));
+ op = tcg_cmpcond_to_rvv_vi[cond].op;
+ tcg_debug_assert(op != 0);
+ imm_min = tcg_cmpcond_to_rvv_vi[cond].min;
+ imm_max = tcg_cmpcond_to_rvv_vi[cond].max;
+
+ if (arg2 >= imm_min && arg2 <= imm_max) {
+ if (tcg_cmpcond_to_rvv_vi[cond].adjust) {
+ arg2 -= 1;
+ }
+ tcg_out_opc_vi(s, op, TCG_REG_V0, arg1, arg2, true);
+ } else {
+ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, arg2);
+ tcg_out_cmp_vec_vx(s, cond, arg1, TCG_REG_TMP0);
+ }
+}
+
#define SETCOND_INV TCG_TARGET_NB_REGS
#define SETCOND_NEZ (SETCOND_INV << 1)
#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
@@ -2179,6 +2312,33 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_xor_vec:
tcg_out_opc_vv(s, OPC_VXOR_VV, a0, a1, a2, true);
break;
+ case INDEX_op_rvv_cmpcond_vec:
+ {
+ RISCVInsn op;
+ if (const_args[1]) {
+ tcg_out_cmp_vec_vi(s, a2, a0, a1);
+ } else {
+ op = tcg_cmpcond_to_rvv_vv[a2].opc;
+ tcg_debug_assert(op != 0);
+
+ if (tcg_cmpcond_to_rvv_vv[a2].swap) {
+ TCGReg t = a0;
+ a0 = a1;
+ a1 = t;
+ }
+ tcg_out_opc_vv(s, op, TCG_REG_V0, a0, a1, true);
+ }
+ }
+ break;
+ case INDEX_op_rvv_merge_vec:
+ if (const_args[2]) {
+ /* vd[i] = v0.mask[i] ? imm : vs2[i] */
+ tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, a0, a1, a2);
+ } else {
+ /* vd[i] = v0.mask[i] ? vs1[i] : vs2[i] */
+ tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, a0, a1, a2);
+ }
+ break;
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
default:
@@ -2189,10 +2349,31 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...)
{
+ va_list va;
+ TCGv_vec v0, v1;
+ TCGArg a2, a3;
+
+ va_start(va, a0);
+ v0 = temp_tcgv_vec(arg_temp(a0));
+ v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
+ a2 = va_arg(va, TCGArg);
+
switch (opc) {
+ case INDEX_op_cmp_vec:
+ {
+ a3 = va_arg(va, TCGArg);
+ vec_gen_3(INDEX_op_rvv_cmpcond_vec, type, vece,
+ tcgv_vec_arg(v1), a2, a3);
+ tcg_gen_mov_vec(v0, tcg_constant_vec_matching(v0, vece, 0));
+ vec_gen_3(INDEX_op_rvv_merge_vec, type, vece,
+ tcgv_vec_arg(v0), tcgv_vec_arg(v0),
+ tcgv_i64_arg(tcg_constant_i64(-1)));
+ }
+ break;
default:
g_assert_not_reached();
}
+ va_end(va);
}
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
@@ -2204,6 +2385,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type,
unsigned vece)
case INDEX_op_or_vec:
case INDEX_op_xor_vec:
return 1;
+ case INDEX_op_cmp_vec:
+ return -1;
default:
return 0;
}
@@ -2360,6 +2543,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode
op)
case INDEX_op_or_vec:
case INDEX_op_xor_vec:
return C_O1_I2(v, v, v);
+ case INDEX_op_cmp_vec:
+ case INDEX_op_rvv_merge_vec:
+ return C_O1_I2(v, v, vK);
+ case INDEX_op_rvv_cmpcond_vec:
+ return C_O0_I2(v, vK);
default:
g_assert_not_reached();
}
diff --git a/tcg/riscv/tcg-target.opc.h b/tcg/riscv/tcg-target.opc.h
index b80b39e1e5..2f23453c35 100644
--- a/tcg/riscv/tcg-target.opc.h
+++ b/tcg/riscv/tcg-target.opc.h
@@ -10,3 +10,6 @@
* emitted by tcg_expand_vec_op. For those familiar with GCC internals,
* consider these to be UNSPEC with names.
*/
+
+DEF(rvv_cmpcond_vec, 0, 2, 1, IMPLVEC)
+DEF(rvv_merge_vec, 1, 2, 0, IMPLVEC)
--
2.43.0
- Re: [PATCH v1 07/15] tcg/riscv: Implement vector mov/dup{m/i}, (continued)
- [PATCH v1 09/15] tcg/riscv: Implement vector cmp ops,
LIU Zhiwei <=
- [PATCH v1 10/15] tcg/riscv: Implement vector not/neg ops, LIU Zhiwei, 2024/08/13
- [PATCH v1 11/15] tcg/riscv: Implement vector sat/mul ops, LIU Zhiwei, 2024/08/13
- [PATCH v1 12/15] tcg/riscv: Implement vector min/max ops, LIU Zhiwei, 2024/08/13
- [PATCH v1 13/15] tcg/riscv: Implement vector shs/v ops, LIU Zhiwei, 2024/08/13
- [PATCH v1 14/15] tcg/riscv: Implement vector roti/v/x shi ops, LIU Zhiwei, 2024/08/13