[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH v2 58/67] Hexagon HVX import macro definitions
From: |
Taylor Simpson |
Subject: |
[RFC PATCH v2 58/67] Hexagon HVX import macro definitions |
Date: |
Fri, 28 Feb 2020 10:43:54 -0600 |
Imported from the Hexagon architecture library
imported/allext_macros.def Top level macro include for all extensions
imported/mmvec/macros.def HVX macro definitions
The macro definition files specify instruction attributes that are applied
to each instruction that reverences the macro.
Signed-off-by: Taylor Simpson <address@hidden>
---
target/hexagon/imported/allext_macros.def | 25 +
target/hexagon/imported/mmvec/macros.def | 1110 +++++++++++++++++++++++++++++
2 files changed, 1135 insertions(+)
create mode 100644 target/hexagon/imported/allext_macros.def
create mode 100755 target/hexagon/imported/mmvec/macros.def
diff --git a/target/hexagon/imported/allext_macros.def
b/target/hexagon/imported/allext_macros.def
new file mode 100644
index 0000000..e1f16eb
--- /dev/null
+++ b/target/hexagon/imported/allext_macros.def
@@ -0,0 +1,25 @@
+/*
+ * Copyright(c) 2019-2020 Qualcomm Innovation Center, Inc. All Rights
Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Top level file for all instruction set extensions
+ */
+#define EXTNAME mmvec
+#define EXTSTR "mmvec"
+#include "mmvec/macros.def"
+#undef EXTNAME
+#undef EXTSTR
diff --git a/target/hexagon/imported/mmvec/macros.def
b/target/hexagon/imported/mmvec/macros.def
new file mode 100755
index 0000000..fa0beb1
--- /dev/null
+++ b/target/hexagon/imported/mmvec/macros.def
@@ -0,0 +1,1110 @@
+/*
+ * Copyright(c) 2019-2020 Qualcomm Innovation Center, Inc. All Rights
Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+DEF_MACRO(fDUMPQ,(STR,REG),
+ "dump REG",
+ "dump REG",
+ do {
+ printf(STR ":" #REG ": 0x%016llx\n",REG.ud[0]);
+ } while (0),
+ ()
+)
+
+DEF_MACRO(fUSE_LOOKUP_ADDRESS_BY_REV,(PROC),
+ "",
+ "Use full VA address for lookup and exception based on REV ",
+ PROC->arch_proc_options->mmvec_use_full_va_for_lookup,
+ ()
+)
+
+DEF_MACRO(fUSE_LOOKUP_ADDRESS,(),
+ "",
+ "Use full VA address for lookup and exception",
+ 1,
+ ()
+)
+
+DEF_MACRO(fRT8NOTE, (),
+ "",
+ "",
+ ,
+ (A_NOTE_RT8)
+)
+
+DEF_MACRO(fNOTQ,(VAL),
+ "~VAL",
+ "~VAL",
+ /* Will break Visual Studio? */
+ ({mmqreg_t _ret = {0}; int _i_; for (_i_ = 0; _i_ < fVECSIZE()/64;
_i_++) _ret.ud[_i_] = ~VAL.ud[_i_]; _ret;}),
+ ()
+)
+
+DEF_MACRO(fGETQBITS,(REG,WIDTH,MASK,BITNO),
+ "REG[BITNO+WIDTH-1:BITNO]",
+ "Get MASK bits at BITNO from REG",
+ ((MASK) & (REG.w[(BITNO)>>5] >> ((BITNO) & 0x1f))),
+ ()
+)
+
+DEF_MACRO(fGETQBIT,(REG,BITNO),
+ "REG[BITNO]",
+ "Get bit BITNO from REG",
+ fGETQBITS(REG,1,1,BITNO),
+ ()
+)
+
+DEF_MACRO(fGENMASKW,(QREG,IDX),
+ "maskw(QREG,IDX)",
+ "Generate mask from QREG for word IDX",
+ (((fGETQBIT(QREG,(IDX*4+0)) ? 0xFF : 0x0) << 0)
+ |((fGETQBIT(QREG,(IDX*4+1)) ? 0xFF : 0x0) << 8)
+ |((fGETQBIT(QREG,(IDX*4+2)) ? 0xFF : 0x0) << 16)
+ |((fGETQBIT(QREG,(IDX*4+3)) ? 0xFF : 0x0) << 24)),
+ ()
+)
+DEF_MACRO(fGET10BIT,(COE,VAL,POS),
+ "COE=(((((fGETUBYTE(3,VAL) >> (2 * POS)) & 3) << 8) |
fGETUBYTE(POS,VAL)) << 6) >> 6;",
+ "Get 10-bit coefficient from current word value and byte position",
+ {
+ COE = (((((fGETUBYTE(3,VAL) >> (2 * POS)) & 3) << 8) |
fGETUBYTE(POS,VAL)) << 6);
+ COE >>= 6;
+ },
+ ()
+)
+
+DEF_MACRO(fVMAX,(X,Y),
+ "max(X,Y)",
+ "",
+ (X>Y) ? X : Y,
+ ()
+)
+
+
+DEF_MACRO(fREAD_VEC,
+ (DST,IDX),
+ "DST=VREG[IDX]", /* short desc */
+ "Read Vector IDX", /* long desc */
+ (DST = READ_VREG(fMODCIRCU((IDX),5))),
+ ()
+)
+
+DEF_MACRO(fGETNIBBLE,(IDX,SRC),
+ "SRC.s4[IDX]",
+ "Get nibble",
+ ( fSXTN(4,8,(SRC >> (4*IDX)) & 0xF) ),
+ ()
+)
+
+DEF_MACRO(fGETCRUMB,(IDX,SRC),
+ "SRC.s2[IDX]",
+ "Get 2bits",
+ ( fSXTN(2,8,(SRC >> (2*IDX)) & 0x3) ),
+ ()
+)
+
+DEF_MACRO(fGETCRUMB_SYMMETRIC,(IDX,SRC),
+ "SRC.s2[IDX] >= 0 ? (2-SRC.s2[IDX]) : SRC.s2[IDX]",
+ "Get 2bits",
+ ( (fGETCRUMB(IDX,SRC)>=0 ? (2-fGETCRUMB(IDX,SRC)) : fGETCRUMB(IDX,SRC) ) ),
+ ()
+)
+
+#define ZERO_OFFSET_2B +
+
+DEF_MACRO(fWRITE_VEC,
+ (IDX,VAR),
+ "VREG[IDX]=VAR", /* short desc */
+ "Write Vector IDX", /* long desc */
+ (WRITE_VREG(fMODCIRCU((IDX),5),VAR)),
+ ()
+)
+
+DEF_MACRO(fGENMASKH,(QREG,IDX),
+ "maskh(QREG,IDX)",
+ "generate mask from QREG for halfword IDX",
+ (((fGETQBIT(QREG,(IDX*2+0)) ? 0xFF : 0x0) << 0)
+ |((fGETQBIT(QREG,(IDX*2+1)) ? 0xFF : 0x0) << 8)),
+ ()
+)
+
+DEF_MACRO(fGETMASKW,(VREG,QREG,IDX),
+ "VREG.w[IDX] & fGENMASKW(QREG,IDX)",
+ "Mask word IDX from VREG using QREG",
+ (VREG.w[IDX] & fGENMASKW((QREG),IDX)),
+ ()
+)
+
+DEF_MACRO(fGETMASKH,(VREG,QREG,IDX),
+ "VREG.h[IDX] & fGENMASKH(QREG,IDX)",
+ "Mask word IDX from VREG using QREG",
+ (VREG.h[IDX] & fGENMASKH((QREG),IDX)),
+ ()
+)
+
+DEF_MACRO(fCONDMASK8,(QREG,IDX,YESVAL,NOVAL),
+ "QREG.IDX ? YESVAL : NOVAL",
+ "QREG.IDX ? YESVAL : NOVAL",
+ (fGETQBIT(QREG,IDX) ? (YESVAL) : (NOVAL)),
+ ()
+)
+
+DEF_MACRO(fCONDMASK16,(QREG,IDX,YESVAL,NOVAL),
+ "select_bytes(QREG,IDX,YESVAL,NOVAL)",
+ "select_bytes(QREG,IDX,YESVAL,NOVAL)",
+ ((fGENMASKH(QREG,IDX) & (YESVAL)) | (fGENMASKH(fNOTQ(QREG),IDX) &
(NOVAL))),
+ ()
+)
+
+DEF_MACRO(fCONDMASK32,(QREG,IDX,YESVAL,NOVAL),
+ "select_bytes(QREG,IDX,YESVAL,NOVAL)",
+ "select_bytes(QREG,IDX,YESVAL,NOVAL)",
+ ((fGENMASKW(QREG,IDX) & (YESVAL)) | (fGENMASKW(fNOTQ(QREG),IDX) &
(NOVAL))),
+ ()
+)
+
+
+DEF_MACRO(fSETQBITS,(REG,WIDTH,MASK,BITNO,VAL),
+ "REG[BITNO+WIDTH-1:BITNO] = VAL",
+ "Put bits into REG",
+ do {
+ size4u_t __TMP = (VAL);
+ REG.w[(BITNO)>>5] &= ~((MASK) << ((BITNO) & 0x1f));
+ REG.w[(BITNO)>>5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f));
+ } while (0),
+ ()
+)
+
+DEF_MACRO(fSETQBIT,(REG,BITNO,VAL),
+ "REG[BITNO]=VAL",
+ "Put bit into REG",
+ fSETQBITS(REG,1,1,BITNO,VAL),
+ ()
+)
+
+DEF_MACRO(fVBYTES,(),
+ "VWIDTH",
+ "Number of bytes in a vector",
+ (fVECSIZE()),
+ ()
+)
+
+DEF_MACRO(fVHALVES,(),
+ "VWIDTH/2",
+ "Number of halves in a vector",
+ (fVECSIZE()/2),
+ ()
+)
+
+DEF_MACRO(fVWORDS,(),
+ "VWIDTH/2",
+ "Number of words in a vector",
+ (fVECSIZE()/4),
+ ()
+)
+
+DEF_MACRO(fVDWORDS,(),
+ "VWIDTH/8",
+ "Number of double words in a vector",
+ (fVECSIZE()/8),
+ ()
+)
+
+DEF_MACRO(fVALIGN, (ADDR, LOG2_ALIGNMENT),
+ "ADDR = ADDR & ~(LOG2_ALIGNMENT-1)",
+ "Align to Element Size",
+ ( ADDR = ADDR & ~(LOG2_ALIGNMENT-1)),
+ ()
+)
+
+DEF_MACRO(fVLASTBYTE, (ADDR, LOG2_ALIGNMENT),
+ "ADDR = ADDR | (LOG2_ALIGNMENT-1)",
+ "Set LSB of length to last byte",
+ ( ADDR = ADDR | (LOG2_ALIGNMENT-1)),
+ ()
+)
+
+
+DEF_MACRO(fVELEM, (WIDTH),
+ "VBITS/WIDTH",
+ "Number of WIDTH-bit elements in a vector",
+ ((fVECSIZE()*8)/WIDTH),
+ ()
+)
+
+DEF_MACRO(fVECLOGSIZE,(),
+ "log2(VECTOR_SIZE)",
+ "Log base 2 of the number of bytes in a vector",
+ (mmvec_current_veclogsize(thread)),
+ ()
+)
+
+DEF_MACRO(fVBUF_IDX,(EA),
+ "(EA >> log2(VECTOR_SIZE)) & 0xFF",
+ "(EA >> log2(VECTOR_SIZE)) & 0xFF",
+ (((EA) >> fVECLOGSIZE()) & 0xFF),
+ (A_FAKEINSN)
+)
+
+DEF_MACRO(fREAD_VBUF,(IDX,WIDX),
+ "vbuf[IDX].w[WIDX]",
+ "vbuf[IDX].w[WIDX]",
+ READ_VBUF(IDX,WIDX),
+ (A_FAKEINSN)
+)
+
+DEF_MACRO(fLOG_VBUF,(IDX,VAL,WIDX),
+ "vbuf[IDX].w[WIDX] = VAL",
+ "vbuf[IDX].w[WIDX] = VAL",
+ LOG_VBUF(IDX,VAL,WIDX),
+ (A_FAKEINSN)
+)
+
+DEF_MACRO(fVECSIZE,(),
+ "VBYTES",
+ "Number of bytes in a vector currently",
+ (1<<fVECLOGSIZE()),
+ ()
+)
+
+DEF_MACRO(fSWAPB,(A, B),
+ "SWAP(A,B)",
+ "Swap bytes",
+ {
+ size1u_t tmp = A;
+ A = B;
+ B = tmp;
+ },
+ /* NOTHING */
+)
+
+DEF_MACRO(
+ fVZERO,(),
+ "0",
+ "0",
+ mmvec_zero_vector(),
+ ()
+)
+
+DEF_MACRO(
+ fNEWVREG,(VNUM),
+ "VNUM.new",
+ "Register value produced in this packet",
+ ((THREAD2STRUCT->VRegs_updated & (((VRegMask)1)<<VNUM)) ?
THREAD2STRUCT->future_VRegs[VNUM] : mmvec_zero_vector()),
+ (A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY)
+)
+
+DEF_MACRO(
+ fV_AL_CHECK,
+ (EA,MASK),
+ "",
+ "",
+ if ((EA) & (MASK)) {
+ warn("aligning misaligned vector. PC=%08x
EA=%08x",thread->Regs[REG_PC],(EA));
+ },
+ ()
+)
+DEF_MACRO(fSCATTER_INIT, ( REGION_START, LENGTH, ELEMENT_SIZE),
+ "",
+ "",
+ {
+ mem_vector_scatter_init(thread, insn, REGION_START, LENGTH,
ELEMENT_SIZE);
+ if (EXCEPTION_DETECTED) return;
+ },
+ (A_STORE,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST,A_RESTRICT_SLOT0ONLY)
+)
+
+DEF_MACRO(fGATHER_INIT, ( REGION_START, LENGTH, ELEMENT_SIZE),
+ "",
+ "",
+ {
+ mem_vector_gather_init(thread, insn, REGION_START, LENGTH, ELEMENT_SIZE);
+ if (EXCEPTION_DETECTED) return;
+ },
+ (A_LOAD,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST,A_RESTRICT_SLOT1ONLY)
+)
+
+DEF_MACRO(fSCATTER_FINISH, (OP),
+ "",
+ "",
+ {
+ if (EXCEPTION_DETECTED) return;
+ mem_vector_scatter_finish(thread, insn, OP);
+ },
+ ()
+)
+
+DEF_MACRO(fGATHER_FINISH, (),
+ "",
+ "",
+ {
+ if (EXCEPTION_DETECTED) return;
+ mem_vector_gather_finish(thread, insn);
+ },
+ ()
+)
+
+
+DEF_MACRO(CHECK_VTCM_PAGE, (FLAG, BASE, LENGTH, OFFSET, ALIGNMENT),
+ "FLAG=((BASE+OFFSET) < (BASE+LENGTH))",
+ "FLAG=((BASE+OFFSET) < (BASE+LENGTH))",
+ {
+ int slot = insn->slot;
+ paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
+ pa = pa & ~(ALIGNMENT-1);
+ FLAG = (pa < (thread->mem_access[slot].paddr+LENGTH));
+ },
+ ()
+)
+DEF_MACRO(COUNT_OUT_OF_BOUNDS, (FLAG, SIZE),
+ " ",
+ "",
+ {
+ if (!FLAG)
+ {
+ THREAD2STRUCT->vtcm_log.oob_access += SIZE;
+ warn("Scatter/Gather out of bounds of region");
+ }
+ },
+ ()
+)
+
+DEF_MACRO(fLOG_SCATTER_OP, (SIZE),
+ " ",
+ " ",
+ {
+ // Log the size and indicate that the extension ext.c file needs to
increment right before memory write
+ THREAD2STRUCT->vtcm_log.op = 1;
+ THREAD2STRUCT->vtcm_log.op_size = SIZE;
+ },
+ ()
+)
+
+
+
+DEF_MACRO(fVLOG_VTCM_WORD_INCREMENT, (EA,OFFSET,INC,IDX,ALIGNMENT,LEN),
+ "if (RtV <= EA <= RtV + LEN) *EA += INC.uw[IDX] ",
+ "if (RtV <= EA <= RtV + LEN) *EA += INC.uw[IDX] ",
+ {
+ int slot = insn->slot;
+ int log_bank = 0;
+ int log_byte =0;
+ paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1));
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ for(int i0 = 0; i0 < 4; i0++)
+ {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high));
+ log_bank |= (log_byte<<i0);
+ LOG_VTCM_BYTE(pa+i0,log_byte,INC.ub[4*IDX+i0],4*IDX+i0);
+ }
+ { LOG_VTCM_BANK(pa, log_bank, IDX); }
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORD_INCREMENT, (EA,OFFSET,INC,IDX,ALIGNMENT,LEN),
+ "if (RtV <= EA <= RtV + LEN) *EA += INC.uh[IDX] ",
+ "if (RtV <= EA <= RtV + LEN) *EA += INC.uh[IDX] ",
+ {
+ int slot = insn->slot;
+ int log_bank = 0;
+ int log_byte = 0;
+ paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1));
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ for(int i0 = 0; i0 < 2; i0++) {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high));
+ log_bank |= (log_byte<<i0);
+ LOG_VTCM_BYTE(pa+i0,log_byte,INC.ub[2*IDX+i0],2*IDX+i0);
+ }
+ { LOG_VTCM_BANK(pa, log_bank,IDX); }
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORD_INCREMENT_DV,
(EA,OFFSET,INC,IDX,IDX2,IDX_H,ALIGNMENT,LEN),
+ "if (RtV <= EA <= RtV + LEN) *EA += IN.uw[IDX2].uh[IDX_H] ",
+ "if (RtV <= EA <= RtV + LEN) *EA += IN.uw[IDX2].uh[IDX_H] ",
+ {
+ int slot = insn->slot;
+ int log_bank = 0;
+ int log_byte = 0;
+ paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1));
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ for(int i0 = 0; i0 < 2; i0++) {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high));
+ log_bank |= (log_byte<<i0);
+ LOG_VTCM_BYTE(pa+i0,log_byte,INC.ub[2*IDX+i0],2*IDX+i0);
+ }
+ { LOG_VTCM_BANK(pa, log_bank,(2*IDX2+IDX_H));}
+ },
+ ()
+)
+
+
+
+DEF_MACRO(GATHER_FUNCTION, (EA,OFFSET,IDX, LEN, ELEMENT_SIZE, BANK_IDX, QVAL),
+"",
+"",
+{
+ int slot = insn->slot;
+ int i0;
+ paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ int log_bank = 0;
+ int log_byte = 0;
+ for(i0 = 0; i0 < ELEMENT_SIZE; i0++)
+ {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL;
+ log_bank |= (log_byte<<i0);
+ size1u_t B = sim_mem_read1(thread->system_ptr, thread->threadId,
thread->mem_access[slot].paddr+OFFSET+i0);
+ THREAD2STRUCT->tmp_VRegs[0].ub[ELEMENT_SIZE*IDX+i0] = B;
+ LOG_VTCM_BYTE(pa+i0,log_byte,B,ELEMENT_SIZE*IDX+i0);
+ }
+ LOG_VTCM_BANK(pa, log_bank,BANK_IDX);
+},
+()
+)
+
+
+
+DEF_MACRO(fVLOG_VTCM_GATHER_WORD, (EA,OFFSET,IDX, LEN),
+ "if (RtV <= EA <= RtV + LEN) TEMP.uw[IDX] = *EA ",
+ "if (RtV <= EA <= RtV + LEN) TEMP.uw[IDX] = *EA ",
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX, 1);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD, (EA,OFFSET,IDX, LEN),
+ " if (RtV <= EA <= RtV + LEN) TEMP.uh[IDX] = *EA ",
+ " if (RtV <= EA <= RtV + LEN) TEMP.uh[IDX] = *EA ",
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX, 1);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD_DV, (EA,OFFSET,IDX,IDX2,IDX_H, LEN),
+ "if (RtV <= EA <= RtV + LEN) TEMP.uw[IDX2].uh[IDX_H] = *EA ",
+ "if (RtV <= EA <= RtV + LEN) TEMP.uw[IDX2].uh[IDX_H] = *EA ",
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), 1);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_GATHER_WORDQ, (EA,OFFSET,IDX, Q, LEN),
+ " if ( (RtV <= EA <= RtV + LEN) & Q) TEMP.uw[IDX] = *EA ",
+ " if ( (RtV <= EA <= RtV + LEN) & Q) TEMP.uw[IDX] = *EA ",
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX,
fGETQBIT(QsV,4*IDX+i0));
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ, (EA,OFFSET,IDX, Q, LEN),
+ " if ( (RtV <= EA <= RtV + LEN) & Q) TEMP.uh[IDX] = *EA ",
+ " if ( (RtV <= EA <= RtV + LEN) & Q) TEMP.uh[IDX] = *EA ",
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX,
fGETQBIT(QsV,2*IDX+i0));
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ_DV, (EA,OFFSET,IDX,IDX2,IDX_H, Q, LEN),
+ " if ( (RtV <= EA <= RtV + LEN) & Q) TEMP.uw[IDX2].uh[IDX_H] = *EA ",
+ " if ( (RtV <= EA <= RtV + LEN) & Q) TEMP.uw[IDX2].uh[IDX_H] = *EA ",
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H),
fGETQBIT(QsV,2*IDX+i0));
+ },
+ ()
+)
+
+
+DEF_MACRO(DEBUG_LOG_ADDR, (OFFSET),
+ " ",
+ " ",
+ {
+
+ if (thread->processor_ptr->arch_proc_options->mmvec_network_addr_log2)
+ {
+
+ int slot = insn->slot;
+ paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
+ }
+ },
+ ()
+)
+
+
+
+
+
+
+
+DEF_MACRO(SCATTER_OP_WRITE_TO_MEM, (TYPE),
+ " Read, accumulate, and write to VTCM",
+ " ",
+ {
+ for (int i = 0; i < mmvecx->vtcm_log.size; i+=sizeof(TYPE))
+ {
+ if ( mmvecx->vtcm_log.mask.ub[i] != 0) {
+ TYPE dst = 0;
+ TYPE inc = 0;
+ for(int j = 0; j < sizeof(TYPE); j++) {
+ dst |= (sim_mem_read1(thread->system_ptr,
thread->threadId, mmvecx->vtcm_log.pa[i+j]) << (8*j));
+ inc |= mmvecx->vtcm_log.data.ub[j+i] << (8*j);
+
+ mmvecx->vtcm_log.mask.ub[j+i] = 0;
+ mmvecx->vtcm_log.data.ub[j+i] = 0;
+ mmvecx->vtcm_log.offsets.ub[j+i] = 0;
+ }
+ dst += inc;
+ for(int j = 0; j < sizeof(TYPE); j++) {
+ sim_mem_write1(thread->system_ptr,thread->threadId,
mmvecx->vtcm_log.pa[i+j], (dst >> (8*j))& 0xFF );
+ }
+ }
+
+ }
+ },
+ ()
+)
+
+DEF_MACRO(SCATTER_FUNCTION, (EA,OFFSET,IDX, LEN, ELEMENT_SIZE, BANK_IDX, QVAL,
IN),
+"",
+"",
+{
+ int slot = insn->slot;
+ int i0;
+ paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ int log_bank = 0;
+ int log_byte = 0;
+ for(i0 = 0; i0 < ELEMENT_SIZE; i0++) {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL;
+ log_bank |= (log_byte<<i0);
+
LOG_VTCM_BYTE(pa+i0,log_byte,IN.ub[ELEMENT_SIZE*IDX+i0],ELEMENT_SIZE*IDX+i0);
+ }
+ LOG_VTCM_BANK(pa, log_bank,BANK_IDX);
+
+},
+()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORD, (EA,OFFSET,IN,IDX, LEN),
+ "if (RtV <= EA <= RtV + LEN) *EA = IN.uh[IDX] ",
+ "if (RtV <= EA <= RtV + LEN) *EA = IN.uh[IDX] ",
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, IDX, 1, IN);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_WORD, (EA,OFFSET,IN,IDX,LEN),
+ "if (RtV <= EA <= RtV + LEN) *EA = IN.uw[IDX] ",
+ "if (RtV <= EA <= RtV + LEN) *EA = IN.uw[IDX] ",
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 4, IDX, 1, IN);
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORDQ, (EA,OFFSET,IN,IDX,Q,LEN),
+ " if ( (RtV <= EA <= RtV + LEN) & Q) *EA = IN.uh[IDX] ",
+ " if ( (RtV <= EA <= RtV + LEN) & Q) *EA = IN.uh[IDX] ",
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, IDX,
fGETQBIT(QsV,2*IDX+i0), IN);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_WORDQ, (EA,OFFSET,IN,IDX,Q,LEN),
+ " if ( (RtV <= EA <= RtV + LEN) & Q) *EA = IN.uw[IDX] ",
+ " if ( (RtV <= EA <= RtV + LEN) & Q) *EA = IN.uw[IDX] ",
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 4, IDX,
fGETQBIT(QsV,4*IDX+i0), IN);
+ },
+ ()
+)
+
+
+
+
+
+DEF_MACRO(fVLOG_VTCM_HALFWORD_DV, (EA,OFFSET,IN,IDX,IDX2,IDX_H, LEN),
+ "if (RtV <= EA <= RtV + LEN) *EA = IN.hw[IDX2].uh[IDX_H] ",
+ "if (RtV <= EA <= RtV + LEN) *EA = IN.hw[IDX2].uh[IDX_H] ",
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), 1, IN);
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORDQ_DV, (EA,OFFSET,IN,IDX,Q,IDX2,IDX_H, LEN),
+ " if ( (RtV <= EA <= RtV + LEN) & Q) *EA = IN.hw[IDX2].uh[IDX_H] ",
+ " if ( (RtV <= EA <= RtV + LEN) & Q) *EA = IN.hw[IDX2].uh[IDX_H] ",
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H),
fGETQBIT(QsV,2*IDX+i0), IN);
+ },
+ ()
+)
+
+
+
+
+
+
+DEF_MACRO(fSTORERELEASE, (EA,TYPE),
+ "char* addr = EA&~(ALIGNMENT-1); Zero Byte Store Release (Non-blocking
Sync)",
+ "Zero Byte Store Release (Sync)",
+ {
+ fV_AL_CHECK(EA,fVECSIZE()-1);
+
+ mem_store_release(thread, insn, fVECSIZE(), EA&~(fVECSIZE()-1), EA,
TYPE, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST)
+)
+
+DEF_MACRO(fVFETCH_AL, (EA),
+ "Prefetch vector into L2 cache at EA",
+ "Prefetch vector into L2 cache at EA",
+ {
+ fV_AL_CHECK(EA,fVECSIZE()-1);
+ mem_fetch_vector(thread, insn, EA&~(fVECSIZE()-1), insn->slot, fVECSIZE());
+ },
+ (A_LOAD,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST,A_RESTRICT_NOSLOT1_STORE)
+)
+
+
+DEF_MACRO(fLOADMMV_AL, (EA, ALIGNMENT, LEN, DST),
+ "char* addr = EA&~(ALIGNMENT-1); for (i=0; i<LEN; ++i) DST[i] = addr[i]",
+ "Load LEN bytes from memory at EA (forced alignment) to DST.",
+ {
+ fV_AL_CHECK(EA,ALIGNMENT-1);
+ thread->last_pkt->double_access_vec = 0;
+ mem_load_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot,
LEN, &DST.ub[0], LEN, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_LOAD,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST,A_RESTRICT_NOSLOT1_STORE)
+)
+
+DEF_MACRO(fLOADMMV, (EA, DST),
+ "DST = *(EA&~(ALIGNMENT-1))",
+ "Load vector from memory at EA (forced alignment) to DST.",
+ fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST),
+ ()
+)
+
+DEF_MACRO(fLOADMMVQ, (EA,DST,QVAL),
+ "DST = vmux(QVAL,*(EA&~(ALIGNMENT-1)),0)",
+ "Load vector from memory at EA (forced alignment) to DST.",
+ do {
+ int __i;
+ fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST);
+ fVFOREACH(8,__i) if (!fGETQBIT(QVAL,__i)) DST.b[__i] = 0;
+ } while (0),
+ ()
+)
+
+DEF_MACRO(fLOADMMVNQ, (EA,DST,QVAL),
+ "DST = vmux(QVAL,0,*(EA&~(ALIGNMENT-1)))",
+ "Load vector from memory at EA (forced alignment) to DST.",
+ do {
+ int __i;
+ fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST);
+ fVFOREACH(8,__i) if (fGETQBIT(QVAL,__i)) DST.b[__i] = 0;
+ } while (0),
+ ()
+)
+
+DEF_MACRO(fLOADMMVU_AL, (EA, ALIGNMENT, LEN, DST),
+ "char* addr = EA; for (i=0; i<LEN; ++i) DST[i] = addr[i]",
+ "Load LEN bytes from memory at EA (unaligned) to DST.",
+ {
+ size4u_t size2 = (EA)&(ALIGNMENT-1);
+ size4u_t size1 = LEN-size2;
+ thread->last_pkt->double_access_vec = 1;
+ mem_load_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1,
size2, &DST.ub[size1], size2, fUSE_LOOKUP_ADDRESS());
+ mem_load_vector_oddva(thread, insn, EA, EA,/* slot */ 0, size1,
&DST.ub[0], size1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_LOAD,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST,A_RESTRICT_NOSLOT1_STORE)
+)
+
+DEF_MACRO(fLOADMMVU, (EA, DST),
+ "DST = *EA",
+ "Load vector from memory at EA (unaligned) to DST.",
+ {
+ /* if address happens to be aligned, only do aligned load */
+ thread->last_pkt->pkt_has_vtcm_access = 0;
+ thread->last_pkt->pkt_access_count = 0;
+ if ( (EA & (fVECSIZE()-1)) == 0) {
+ thread->last_pkt->pkt_has_vmemu_access = 0;
+ thread->last_pkt->double_access = 0;
+
+ fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST);
+ } else {
+ thread->last_pkt->pkt_has_vmemu_access = 1;
+ thread->last_pkt->double_access = 1;
+
+ fLOADMMVU_AL(EA,fVECSIZE(),fVECSIZE(),DST);
+ }
+ },
+ ()
+)
+
+DEF_MACRO(fSTOREMMV_AL, (EA, ALIGNMENT, LEN, SRC),
+ "char* addr = EA&~(ALIGNMENT-1); for (i=0; i<LEN; ++i) addr[i] = SRC[i]",
+ "Store LEN bytes from SRC into memory at EA (forced alignment).",
+ {
+ fV_AL_CHECK(EA,ALIGNMENT-1);
+ mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot,
LEN, &SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST)
+)
+
+DEF_MACRO(fSTOREMMV, (EA, SRC),
+ "*(EA&~(ALIGNMENT-1)) = SRC",
+ "Store vector SRC to memory at EA (unaligned).",
+ fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC),
+ ()
+)
+
+DEF_MACRO(fSTOREMMVQ_AL, (EA, ALIGNMENT, LEN, SRC, MASK),
+ "char* addr = EA&~(ALIGNMENT-1); for (i=0; i<LEN; ++i) if (MASK[i])
addr[i] = SRC[i]",
+ "Store LEN bytes from SRC into memory at EA (forced alignment).",
+ do {
+ mmvector_t maskvec;
+ int i;
+ for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
+ mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot,
LEN, &SRC.ub[0], &maskvec.ub[0], 0,
fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ } while (0),
+ (A_STORE,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST)
+)
+
+DEF_MACRO(fSTOREMMVQ, (EA, SRC, MASK),
+ "*(EA&~(ALIGNMENT-1)) = SRC",
+ "Masked store vector SRC to memory at EA (forced alignment).",
+ fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK),
+ ()
+)
+
+DEF_MACRO(fSTOREMMVNQ_AL, (EA, ALIGNMENT, LEN, SRC, MASK),
+ "char* addr = EA&~(ALIGNMENT-1); for (i=0; i<LEN; ++i) if (!MASK[i])
addr[i] = SRC[i]",
+ "Store LEN bytes from SRC into memory at EA (forced alignment).",
+ {
+ mmvector_t maskvec;
+ int i;
+ for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
+ fV_AL_CHECK(EA,ALIGNMENT-1);
+ mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot,
LEN, &SRC.ub[0], &maskvec.ub[0], 1,
fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST)
+)
+
+DEF_MACRO(fSTOREMMVNQ, (EA, SRC, MASK),
+ "*(EA&~(ALIGNMENT-1)) = SRC",
+ "Masked negated store vector SRC to memory at EA (forced alignment).",
+ fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK),
+ ()
+)
+
+DEF_MACRO(fSTOREMMVU_AL, (EA, ALIGNMENT, LEN, SRC),
+ "char* addr = EA; for (i=0; i<LEN; ++i) addr[i] = SRC[i]",
+ "Store LEN bytes from SRC into memory at EA (unaligned).",
+ {
+ size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1));
+ size4u_t size2;
+ if (size1>LEN) size1 = LEN;
+ size2 = LEN-size1;
+ mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */
1, size2, &SRC.ub[size1], 0, 0, fUSE_LOOKUP_ADDRESS());
+ mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1,
&SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST)
+)
+
+DEF_MACRO(fSTOREMMVU, (EA, SRC),
+ "*EA = SRC",
+ "Store vector SRC to memory at EA (unaligned).",
+ {
+ thread->last_pkt->pkt_has_vtcm_access = 0;
+ thread->last_pkt->pkt_access_count = 0;
+ if ( (EA & (fVECSIZE()-1)) == 0) {
+ thread->last_pkt->double_access = 0;
+ fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC);
+ } else {
+ thread->last_pkt->double_access = 1;
+ thread->last_pkt->pkt_has_vmemu_access = 1;
+ fSTOREMMVU_AL(EA,fVECSIZE(),fVECSIZE(),SRC);
+ }
+ },
+ ()
+)
+
+DEF_MACRO(fSTOREMMVQU_AL, (EA, ALIGNMENT, LEN, SRC, MASK),
+ "char* addr = EA; for (i=0; i<LEN; ++i) if (MASK[i]) addr[i] = SRC[i]",
+ "Store LEN bytes from SRC into memory at EA (unaligned).",
+ {
+ size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1));
+ size4u_t size2;
+ mmvector_t maskvec;
+ int i;
+ for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
+ if (size1>LEN) size1 = LEN;
+ size2 = LEN-size1;
+ mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(),/* slot */
1, size2, &SRC.ub[size1], &maskvec.ub[size1], 0, fUSE_LOOKUP_ADDRESS());
+ mem_store_vector_oddva(thread, insn, EA, /* slot */ 0, size1,
&SRC.ub[0], &maskvec.ub[0], 0,
fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST)
+)
+
+DEF_MACRO(fSTOREMMVQU, (EA, SRC, MASK),
+ "*EA = SRC",
+ "Store vector SRC to memory at EA (unaligned).",
+ {
+ thread->last_pkt->pkt_has_vtcm_access = 0;
+ thread->last_pkt->pkt_access_count = 0;
+ if ( (EA & (fVECSIZE()-1)) == 0) {
+ thread->last_pkt->double_access = 0;
+ fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
+ } else {
+ thread->last_pkt->double_access = 1;
+ thread->last_pkt->pkt_has_vmemu_access = 1;
+ fSTOREMMVQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
+ }
+ },
+ ()
+)
+
+DEF_MACRO(fSTOREMMVNQU_AL, (EA, ALIGNMENT, LEN, SRC, MASK),
+ "char* addr = EA; for (i=0; i<LEN; ++i) if (!MASK[i]) addr[i] = SRC[i]",
+ "Store LEN bytes from SRC into memory at EA (unaligned).",
+ {
+ size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1));
+ size4u_t size2;
+ mmvector_t maskvec;
+ int i;
+ for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
+ if (size1>LEN) size1 = LEN;
+ size2 = LEN-size1;
+ mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot
*/ 1, size2, &SRC.ub[size1], &maskvec.ub[size1], 1, fUSE_LOOKUP_ADDRESS());
+ mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1,
&SRC.ub[0], &maskvec.ub[0], 1,
fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE,A_RESTRICT_SINGLE_MEM_FIRST)
+)
+
+DEF_MACRO(fSTOREMMVNQU, (EA, SRC, MASK),
+ "*EA = SRC",
+ "Store vector SRC to memory at EA (unaligned).",
+ {
+ thread->last_pkt->pkt_has_vtcm_access = 0;
+ thread->last_pkt->pkt_access_count = 0;
+ if ( (EA & (fVECSIZE()-1)) == 0) {
+ thread->last_pkt->double_access = 0;
+ fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
+ } else {
+ thread->last_pkt->double_access = 1;
+ thread->last_pkt->pkt_has_vmemu_access = 1;
+ fSTOREMMVNQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
+ }
+ },
+ ()
+)
+
+
+
+
+DEF_MACRO(fVFOREACH,(WIDTH, VAR),
+ "for (VAR = 0; VAR < VELEM(WIDTH); VAR++)",
+ "For VAR in each WIDTH-bit vector index",
+ for (VAR = 0; VAR < fVELEM(WIDTH); VAR++),
+ /* NOTHING */
+)
+
+DEF_MACRO(fVARRAY_ELEMENT_ACCESS, (ARRAY, TYPE, INDEX),
+ "ARRAY.TYPE[INDEX]",
+ "Access element of type TYPE at position INDEX of flattened ARRAY",
+ ARRAY.v[(INDEX) / (fVECSIZE()/(sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) %
(fVECSIZE()/(sizeof(ARRAY.TYPE[0])))],
+ ()
+)
+
+DEF_MACRO(fVNEWCANCEL,(REGNUM),
+ "Ignore current value for register REGNUM",
+ "Ignore current value for register REGNUM",
+ do { THREAD2STRUCT->VRegs_select &= ~(1<<(REGNUM)); } while (0),
+ ()
+)
+
+DEF_MACRO(fTMPVDATA,(),
+ "Data from .tmp load",
+ "Data from .tmp load and clear tmp status",
+ mmvec_vtmp_data(thread),
+ (A_CVI,A_CVI_REQUIRES_TMPLOAD)
+)
+
+DEF_MACRO(fVSATDW, (U,V),
+ "usat_32(U:V)",
+ "Use 32-bits of U as MSW and 32-bits of V as LSW and saturate the
resultant 64-bits to 32 bits",
+ fVSATW( ( ( ((long long)U)<<32 ) | fZXTN(32,64,V) ) ),
+ /* attribs */
+)
+
+DEF_MACRO(fVASL_SATHI, (U,V),
+ "uasl_sathi(U:V)",
+ "Use 32-bits of U as MSW and 32-bits of V as LSW, left shift by 1 and
saturate the result and take high word",
+ fVSATW(((U)<<1) | ((V)>>31)),
+ /* attribs */
+)
+
+DEF_MACRO(fVUADDSAT,(WIDTH,U,V),
+ "usat_##WIDTH(U+V)",
+ "Add WIDTH-bit values U and V with saturation",
+ fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V)),
+ /* attribs */
+)
+
+DEF_MACRO(fVSADDSAT,(WIDTH,U,V),
+ "sat_##WIDTH(U+V)",
+ "Add WIDTH-bit values U and V with saturation",
+ fVSATN( WIDTH, fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V)),
+ /* attribs */
+)
+
+DEF_MACRO(fVUSUBSAT,(WIDTH,U,V),
+ "usat_##WIDTH(U-V)",
+ "sub WIDTH-bit values U and V with saturation",
+ fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V)),
+ /* attribs */
+)
+
+DEF_MACRO(fVSSUBSAT,(WIDTH,U,V),
+ "sat_##WIDTH(U-V)",
+ "sub WIDTH-bit values U and V with saturation",
+ fVSATN( WIDTH, fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)),
+ /* attribs */
+)
+
+DEF_MACRO(fVAVGU,(WIDTH,U,V),
+ "(U+V)/2",
+ "average WIDTH-bit values U and V with saturation",
+ ((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V))>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVAVGURND,(WIDTH,U,V),
+ "(U+V+1)/2",
+ "average WIDTH-bit values U and V with saturation",
+ ((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V)+1)>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGU,(WIDTH,U,V),
+ "(U-V)/2",
+ "average WIDTH-bit values U and V with saturation",
+ ((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V))>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGURNDSAT,(WIDTH,U,V),
+ "(U-V+1)/2",
+ "average WIDTH-bit values U and V with saturation",
+ fVSATUN(WIDTH,((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH,
V)+1)>>1)),
+ /* attribs */
+)
+
+DEF_MACRO(fVAVGS,(WIDTH,U,V),
+ "(U+V)/2",
+ "average WIDTH-bit values U and V with saturation",
+ ((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V))>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVAVGSRND,(WIDTH,U,V),
+ "(U+V+1)/2",
+ "average WIDTH-bit values U and V with saturation",
+ ((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V)+1)>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGS,(WIDTH,U,V),
+ "(U-V)/2",
+ "average WIDTH-bit values U and V with saturation",
+ ((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V))>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGSRND,(WIDTH,U,V),
+ "(U-V+1)/2",
+ "average WIDTH-bit values U and negative V followed by rounding",
+ ((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)+1)>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGSRNDSAT,(WIDTH,U,V),
+ "(U-V+1)/2",
+ "average WIDTH-bit values U and V with saturation",
+ fVSATN(WIDTH,((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH,
V)+1)>>1)),
+ /* attribs */
+)
+
+
+DEF_MACRO(fVNOROUND,(VAL,SHAMT),
+ "VAL",
+ "VAL",
+ VAL,
+ /* NOTHING */
+)
+DEF_MACRO(fVNOSAT,(VAL),
+ "VAL",
+ "VAL",
+ VAL,
+ /* NOTHING */
+)
+
+DEF_MACRO(fVROUND,(VAL,SHAMT),
+ "VAL + (1<<(SHAMT-1))",
+ "VAL + RNDBIT",
+ ((VAL) + (((SHAMT)>0)?(1LL<<((SHAMT)-1)):0)),
+ /* NOTHING */
+)
+
+DEF_MACRO(fCARRY_FROM_ADD32,(A,B,C),
+ "carry_from(A,B,C)",
+ "carry_from(A,B,C)",
+ (((fZXTN(32,64,A)+fZXTN(32,64,B)+C) >> 32) & 1),
+ /* NOTHING */
+)
+
+DEF_MACRO(fUARCH_NOTE_PUMP_4X,(),
+ "",
+ "",
+ ,
+ (A_CVI_PUMP_4X)
+)
+
+DEF_MACRO(fUARCH_NOTE_PUMP_2X,(),
+ "",
+ "",
+ ,
+ (A_CVI_PUMP_2X)
+)
+
+DEF_MACRO(fVDOCHKPAGECROSS,(BASE,SUM),
+ "",
+ "",
+ if (UNLIKELY(thread->timing_on)) {
+ thread->mem_access[slot].check_page_crosses = 1;
+ thread->mem_access[slot].page_cross_base = BASE;
+ thread->mem_access[slot].page_cross_sum = SUM;
+ },
+ (A_EA_PAGECROSS)
+)
+
+
+
--
2.7.4
- [RFC PATCH v2 28/67] Hexagon generator phase 3 - C preprocessor for decode tree, (continued)
- [RFC PATCH v2 28/67] Hexagon generator phase 3 - C preprocessor for decode tree, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 37/67] Hexagon TCG generation helpers - step 4, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 38/67] Hexagon TCG generation helpers - step 5, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 22/67] Hexagon generator phase 2 - qemu_def_generated.h, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 06/67] Hexagon Disassembler, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 47/67] Hexagon TCG generation - step 09, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 32/67] Hexagon macros referenced in instruction semantics, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 62/67] Hexagon HVX macros to interface with the generator, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 48/67] Hexagon TCG generation - step 10, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 26/67] Hexagon generator phase 2 - op_regs_generated.h, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 58/67] Hexagon HVX import macro definitions,
Taylor Simpson <=
- [RFC PATCH v2 60/67] Hexagon HVX instruction decoding, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 63/67] Hexagon HVX macros referenced in instruction semantics, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 39/67] Hexagon TCG generation - step 01, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 52/67] Hexagon Linux user emulation, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 56/67] Hexagon HVX import instruction encodings, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 46/67] Hexagon TCG generation - step 08, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 54/67] Hexagon - Add Hexagon Vector eXtensions (HVX) to core definition, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 30/67] Hexagon opcode data structures, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 61/67] Hexagon HVX instruction utility functions, Taylor Simpson, 2020/02/28
- [RFC PATCH v2 36/67] Hexagon TCG generation helpers - step 3, Taylor Simpson, 2020/02/28