> diff --git a/target-mips/cpu.h b/target-mips/cpu.h
> index 2419aa9..59bcc0f 100644
> --- a/target-mips/cpu.h
> +++ b/target-mips/cpu.h
> @@ -140,6 +140,20 @@ typedef struct mips_def_t mips_def_t;
> #define MIPS_FPU_MAX 1
> #define MIPS_DSP_ACC 4
>
> +typedef struct cavium_mul cavium_mul;
> +struct cavium_mul {
> + target_ulong MPL0;
> + target_ulong MPL1;
> + target_ulong MPL2;
> + target_ulong P0;
> + target_ulong P1;
> + target_ulong P2;
> +};
> +typedef struct cvmctl_register cvmctl_register;
> +struct cvmctl_register {
> + target_ulong cvmctl;
> +};
> +
> typedef struct TCState TCState;
> struct TCState {
> target_ulong gpr[32];
> @@ -178,6 +192,8 @@ struct CPUMIPSState {
> TCState active_tc;
> CPUMIPSFPUContext active_fpu;
>
> + cavium_mul Reg;
> + cvmctl_register CvmCtlRegister;
> uint32_t current_tc;
> uint32_t current_fpu;
>
> diff --git a/target-mips/translate.c b/target-mips/translate.c
> index cce77be..9c3d772 100644
> --- a/target-mips/translate.c
> +++ b/target-mips/translate.c
> @@ -36,6 +36,15 @@
> #define GEN_HELPER 1
> #include "helper.h"
>
> +int TARGET_OCTEON;
> +#if defined(TARGET_MIPS64)
> +/*Macros for setting values of cvmctl registers*/
> +#define FUSE_START_BIT(cvmctl)(cvmctl | 0x80000000)
> +#define KASUMI(cvmctl)(cvmctl | 0x20000000)
> +#define IPPCI(cvmctl)(cvmctl | 0x380)
> +#define IPTI(cvmctl)(cvmctl | 0x70)
> +#endif
> +
> //#define MIPS_DEBUG_DISAS
> //#define MIPS_DEBUG_SIGN_EXTENSIONS
>
> @@ -70,6 +79,11 @@ enum {
> OPC_JAL = (0x03 << 26),
> OPC_JALS = OPC_JAL | 0x5,
> OPC_BEQ = (0x04 << 26), /* Unconditional if rs = rt = 0 (B) */
> + /* Cavium Specific */
> + OPC_BBIT1 = (0x3a << 26), /*jump on bit set, cavium specific*/
> + OPC_BBIT132 = (0x3e << 26), /*jump on bit set(one of the upper 32
> bits)*/
> + OPC_BBIT0 = (0x32 << 26), /*jump on bit clear, cavium specific*/
> + OPC_BBIT032 = (0x36 << 26), /*jump on bit set(one of the upper 32
> bits)*/
> OPC_BEQL = (0x14 << 26),
> OPC_BNE = (0x05 << 26),
> OPC_BNEL = (0x15 << 26),
> @@ -265,6 +279,31 @@ enum {
> OPC_MADD = 0x00 | OPC_SPECIAL2,
> OPC_MADDU = 0x01 | OPC_SPECIAL2,
> OPC_MUL = 0x02 | OPC_SPECIAL2,
> + /* Cavium Specific Instructions */
> + OPC_BADDU = 0x28 | OPC_SPECIAL2,
> + OPC_DMUL = 0x03 | OPC_SPECIAL2,
> + OPC_EXTS = 0x3a | OPC_SPECIAL2,
> + OPC_EXTS32 = 0x3b | OPC_SPECIAL2,
> + OPC_CINS = 0x32 | OPC_SPECIAL2,
> + OPC_CINS32 = 0x33 | OPC_SPECIAL2,
> + OPC_SEQI = 0x2e | OPC_SPECIAL2,
> + OPC_SNEI = 0x2f | OPC_SPECIAL2,
> + OPC_MTM0 = 0x08 | OPC_SPECIAL2,
> + OPC_MTM1 = 0x0c | OPC_SPECIAL2,
> + OPC_MTM2 = 0x0d | OPC_SPECIAL2,
> + OPC_MTP0 = 0x09 | OPC_SPECIAL2,
> + OPC_MTP1 = 0x0a | OPC_SPECIAL2,
> + OPC_MTP2 = 0x0b | OPC_SPECIAL2,
> + OPC_V3MULU = 0x11 | OPC_SPECIAL2,
> + OPC_VMM0 = 0x10 | OPC_SPECIAL2,
> + OPC_VMULU = 0x0f | OPC_SPECIAL2,
> + OPC_POP = 0X2C | OPC_SPECIAL2,
> + OPC_DPOP = 0X2D | OPC_SPECIAL2,
> + OPC_SEQ = 0x2a | OPC_SPECIAL2,
> + OPC_SNE = 0x2b | OPC_SPECIAL2,
> + OPC_SAA = 0x18 | OPC_SPECIAL2,
> + OPC_SAAD = 0x19 | OPC_SPECIAL2,
> +/**************************************/
> OPC_MSUB = 0x04 | OPC_SPECIAL2,
> OPC_MSUBU = 0x05 | OPC_SPECIAL2,
> /* Loongson 2F */
> @@ -483,7 +522,7 @@ enum {
> static TCGv_ptr cpu_env;
> static TCGv cpu_gpr[32], cpu_PC;
> static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC],
> cpu_ACX[MIPS_DSP_ACC];
> -static TCGv cpu_dspctrl, btarget, bcond;
> +static TCGv cpu_dspctrl, btarget, bcond, mpl0, mpl1, mpl2, p0, p1, p2;
> static TCGv_i32 hflags;
> static TCGv_i32 fpu_fcr0, fpu_fcr31;
>
> @@ -779,7 +818,9 @@ static inline void gen_op_addr_add (DisasContext *ctx,
> TCGv ret, TCGv arg0, TCGv
> See the MIPS64 PRA manual, section 4.10. */
> if (((ctx->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
> !(ctx->hflags & MIPS_HFLAG_UX)) {
> - tcg_gen_ext32s_i64(ret, ret);
> + /*This function sign extend 32 bit value to 64 bit, was causing
> error
> + when ld instruction came.Thats why it is commmented out*/
> + /* tcg_gen_ext32s_i64(ret, ret);*/
> }
> #endif
> }
> @@ -1419,7 +1460,33 @@ static void gen_arith_imm (CPUState *env,
> DisasContext *ctx, uint32_t opc,
> (void)opn; /* avoid a compiler warning */
> MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt],
> regnames[rs], uimm);
> }
> -
> +#if defined(TARGET_MIPS64)
> +/*set on equal immidiate/seton not equal immidiate*/
> +static void gen_set_imm(CPUState *env, uint32_t opc, int rt, int rs,
> int16_t imm)
> +{
> + target_ulong uimm;
> + TCGv t0, t1;
> + const char *opn = "imm set";
> + uimm = (uint16_t)imm;
> + t0 = tcg_temp_new();
> + t1 = tcg_temp_new();
> + switch (opc) {
> + case OPC_SEQI:
> + tcg_gen_xori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
> + gen_load_gpr(t0, rt);
> + tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, 1);
> + opn = "seqi";
> + break;
> + case OPC_SNEI:
> + tcg_gen_xori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
> + gen_load_gpr(t0, rt);
> + gen_load_gpr(t1, 0);
> + tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rt], t1, t0);
> + opn = "snei";
> + break;
> + }
> +}
> +#endif
> /* Logic with immediate operand */
> static void gen_logic_imm (CPUState *env, uint32_t opc, int rt, int rs,
> int16_t imm)
> {
> @@ -1583,13 +1650,196 @@ static void gen_shift_imm(CPUState *env,
> DisasContext *ctx, uint32_t opc,
> MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt],
> regnames[rs], uimm);
> tcg_temp_free(t0);
> }
> +#if defined(TARGET_MIPS64)
> +/* Addition and carry detection*/
> +static void gen_addc (TCGv arg1, TCGv arg2, TCGv res, TCGv c)
> +{
> + tcg_gen_add_tl(res, arg1, arg2);
> + tcg_gen_setcond_tl(TCG_COND_LTU, c, res, arg1);
> +}
> +static void gen_LMI (CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
> + int rs, int rt, int rd)
> +{
> + const char *opn = "LMI";
> + TCGv t0, t1;
> + int nomul = env->CvmCtlRegister.cvmctl & 0x8000000;
> + if (!nomul) {
> + switch (opc) {
> + case OPC_MTM0:
> + tcg_gen_mov_tl(mpl0, cpu_gpr[rs]);
> + tcg_gen_movi_tl(p0, 0);
> + tcg_gen_movi_tl(p1, 0);
> + tcg_gen_movi_tl(p2, 0);
> + opn = "mtm0";
> + break;
> + case OPC_MTM1:
> + tcg_gen_mov_tl(mpl1, cpu_gpr[rs]);
> + tcg_gen_movi_tl(p0, 0);
> + tcg_gen_movi_tl(p1, 0);
> + tcg_gen_movi_tl(p2, 0);
> + opn = "mtm1";
> + break;
> + case OPC_MTM2:
> + tcg_gen_mov_tl(mpl2, cpu_gpr[rs]);
> + tcg_gen_movi_tl(p0, 0);
> + tcg_gen_movi_tl(p1, 0);
> + tcg_gen_movi_tl(p2, 0);
> + opn = "mtm2";
> + break;
> + case OPC_MTP0:
> + tcg_gen_mov_tl(p0, cpu_gpr[rs]);
> + opn = "mtp0";
> + break;
> + case OPC_MTP1:
> + tcg_gen_mov_tl(p1, cpu_gpr[rs]);
> + opn = "mtp1";
> + break;
> + case OPC_MTP2:
> + tcg_gen_mov_tl(p2, cpu_gpr[rs]);
> + opn = "mtp2";
> + break;
> + case OPC_VMM0:
> + t0 = tcg_temp_new();
> + t1 = tcg_temp_new();
> + gen_load_gpr(t1, rs);
> + gen_helper_dmultu(t1, mpl0);
> + gen_load_gpr(t0, rt);
> + tcg_gen_add_tl(t0, t0, cpu_LO[0]);
> + tcg_gen_add_tl(t0, t0, p0);
> + gen_store_gpr(t0, rd);
> + tcg_gen_mov_tl(mpl0, cpu_gpr[rd]);
> + tcg_gen_movi_tl(p0, 0);
> + tcg_gen_movi_tl(p1, 0);
> + tcg_gen_movi_tl(p2, 0);
> + tcg_temp_free(t0);
> + tcg_temp_free(t1);
> + opn = "vmm0";
> + break;
> + case OPC_VMULU:
> + {
> + TCGv t2, c;
> + t0 = tcg_temp_new();
> + t1 = tcg_temp_new();
> + t2 = tcg_temp_new();
> + c = tcg_temp_new();
> + gen_load_gpr(t1, rs);
> + gen_load_gpr(t2, rt);
> + gen_helper_dmultu(t1, mpl0);
> + tcg_gen_mov_tl(t0, cpu_LO[0]);
> + /*if carry comes due to addition of rt and LO register,
> + * this carry should be added to HI register.
> + */
> + gen_addc(t0, t2, t1, c);
>
> + tcg_gen_add_tl(cpu_HI[0], cpu_HI[0], c);
> + /* t0 = t1 + p0 where t1 = LO+rt*/
> + gen_addc(t1, p0, t0, c);
> + tcg_gen_add_tl(cpu_HI[0], cpu_HI[0], c);
> +
> + tcg_gen_mov_tl(cpu_gpr[rd], t0);
> + tcg_gen_mov_tl(p0, cpu_HI[0]);
> + tcg_temp_free(t0);
> + tcg_temp_free(t1);
> + tcg_temp_free(t2);
> + opn = "vmulu";
> + break;
> + }
> + case OPC_V3MULU:
> + {
> + TCGv temp[4];
> + TCGv c;
> + TCGv trs, trt, tc1, tc2, tc3;
> + temp[0] = tcg_temp_new();
> + temp[1] = tcg_temp_new();
> + temp[2] = tcg_temp_new();
> + temp[3] = tcg_temp_new();
> + trs = tcg_temp_new();
> + trt = tcg_temp_new();
> + tc1 = tcg_temp_new();
> + tc2 = tcg_temp_new();
> + tc3 = tcg_temp_new();
> + c = tcg_temp_new();
> + gen_load_gpr(trs, rs);
> + gen_load_gpr(trt, rt);
> + /* rs × (MPL2 || MPL1 || MPL0) (192X64 bit multiplication)
> */
> + gen_helper_dmultu(trs, mpl0);
> + tcg_gen_mov_tl(temp[0], cpu_LO[0]);
> + tcg_gen_mov_tl(temp[1], cpu_HI[0]);
> +
> + gen_helper_dmultu(trs, mpl1);
> + tcg_gen_mov_tl(temp[2], cpu_HI[0]);
> + gen_addc(cpu_LO[0], temp[1], tc1, tc2);
> + gen_addc(temp[2], tc2, tc3, c);
> + tcg_gen_mov_tl(temp[1], tc1);
> + tcg_gen_mov_tl(temp[2], tc3);
> + tcg_gen_mov_tl(temp[3], c);
> +
> + gen_helper_dmultu(trs, mpl2);
> + tcg_gen_add_tl(temp[3], temp[3], cpu_HI[0]);
> + gen_addc(cpu_LO[0], temp[2], tc1, tc2);
> + tcg_gen_mov_tl(temp[2], tc1);
> + tcg_gen_add_tl(temp[3], temp[3], tc2);
> + /* Addition of rt in 256 bit result
> + (t3 t2 t1 t0 contain result) */
> + gen_addc(temp[0], trt, tc1, c);
> + tcg_gen_mov_tl(temp[0], tc1);
> + gen_addc(temp[1], c, tc1, tc2);
> + tcg_gen_mov_tl(temp[1], tc1);
> + gen_addc(temp[2], tc2, tc1, c);
> + tcg_gen_mov_tl(temp[2], tc1);
> + tcg_gen_add_tl(temp[3], temp[3], c);
> + /* Addition of p2 p1 p0 in 256 bit result */
> + gen_addc(temp[0], p0, tc1, c);
> + tcg_gen_mov_tl(temp[0], tc1);
> + gen_addc(temp[1], c, tc1, tc2);
> + tcg_gen_mov_tl(temp[1], tc1);
> + gen_addc(temp[2], tc2, tc1, c);
> + tcg_gen_mov_tl(temp[2], tc1);
> + tcg_gen_add_tl(temp[3], temp[3], c);
> +
> + gen_addc(temp[1], p1, tc1, c);
> + tcg_gen_mov_tl(temp[1], tc1);
> + gen_addc(temp[2], c, tc1, tc2);
> + tcg_gen_mov_tl(temp[2], tc1);
> + tcg_gen_add_tl(temp[3], temp[3], tc2);
> +
> + gen_addc(temp[2], p2, tc1, c);
> + tcg_gen_mov_tl(temp[2], tc1);
> + tcg_gen_add_tl(temp[3], temp[3], c);
> + /* final step */
> + tcg_gen_mov_tl(cpu_gpr[rd], temp[0]);
> + tcg_gen_mov_tl(p0, temp[1]);
> + tcg_gen_mov_tl(p1, temp[2]);
> + tcg_gen_mov_tl(p2, temp[3]);
> + tcg_temp_free(temp[0]);
> + tcg_temp_free(temp[1]);
> + tcg_temp_free(temp[2]);
> + tcg_temp_free(temp[3]);
> + tcg_temp_free(trs);
> + tcg_temp_free(trt);
> + tcg_temp_free(tc1);
> + tcg_temp_free(tc2);
> + tcg_temp_free(tc3);
> + tcg_temp_free(c);
> + opn = "v3mulu";
> + break;
> + }
> + }
> +
> + } else {
> + generate_exception(ctx, EXCP_RI);
> + }
> +}
> +
> +
> +#endif
> /* Arithmetic */
> static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
> int rd, int rs, int rt)
> {
> const char *opn = "arith";
>
> + target_ulong mask = 0xFF;
> if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB
> && opc != OPC_DADD && opc != OPC_DSUB) {
> /* If no destination, treat it as a NOP.
> @@ -1637,6 +1887,22 @@ static void gen_arith (CPUState *env, DisasContext
> *ctx, uint32_t opc,
> }
> opn = "addu";
> break;
> + case OPC_BADDU:
> + {
> + TCGv t0 = tcg_temp_new();
> + TCGv t1 = tcg_temp_new();
> + TCGv t2 = tcg_temp_new();
> + gen_load_gpr(t1, rs);
> + gen_load_gpr(t2, rt);
> + tcg_gen_andi_tl(t1, t1, mask);
> + tcg_gen_andi_tl(t2, t2, mask);
> + tcg_gen_add_tl(t0, t1, t2);
> + tcg_gen_andi_tl(t0, t0, mask);
> + gen_store_gpr(t0, rd);
> + }
> +
> + opn = "baddu";
> + break;
> case OPC_SUB:
> {
> TCGv t0 = tcg_temp_local_new();
> @@ -2013,7 +2279,74 @@ static void gen_HILO (DisasContext *ctx, uint32_t
> opc, int reg)
> (void)opn; /* avoid a compiler warning */
> MIPS_DEBUG("%s %s", opn, regnames[reg]);
> }
> +#if defined(TARGET_MIPS64)
> +static void gen_seqsne (DisasContext *ctx, uint32_t opc,
> + int rd, int rs, int rt)
> +{
> + const char *opn = "seq/sne";
> + TCGv t0, t1;
> + t0 = tcg_temp_new();
> + t1 = tcg_temp_new();
> + switch (opc) {
> + case OPC_SEQ:
> + {
> + tcg_gen_xor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
> + gen_load_gpr(t0, rd);
> + tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rd], t0, 1);
> + }
> + opn = "seq";
> + break;
> + case OPC_SNE:
> + {
> + tcg_gen_xor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
> + gen_load_gpr(t0, rd);
> + gen_load_gpr(t1, 0);
> + tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t1, t0);
> + }
> + opn = "sne";
> + break;
> + default:
> + MIPS_INVAL(opn);
> + generate_exception(ctx, EXCP_RI);
> + goto out;
> + }
> +out:
> + tcg_temp_free(t0);
> + tcg_temp_free(t1);
>
> +}
> +
> +static void gen_saa (CPUState *env, DisasContext *ctx, uint32_t opc,
> + int rt, int base)
> +{
> + const char *opn = "saa";
> + TCGv t0, t1, temp;
> + t0 = tcg_temp_new();
> + t1 = tcg_temp_new();
> + temp = tcg_temp_new();
> + gen_load_gpr(t1, rt);
> + gen_base_offset_addr(ctx, t0, base, 0);
> + switch (opc) {
> + case OPC_SAA:
> + save_cpu_state(ctx, 1);
> + op_ld_lw(temp, t0, ctx);
> + tcg_gen_add_tl(temp, temp, t1);
> + op_st_sw(temp, t0, ctx);
> + opn = "saa";
> + break;
> + case OPC_SAAD:
> + save_cpu_state(ctx, 0);
> + op_ld_ld(temp, t0, ctx);
> + tcg_gen_add_tl(temp, temp, t1);
> + op_st_sd(temp, t0, ctx);
> + opn = "saad";
> + break;
> + }
> +
> + tcg_temp_free(t0);
> + tcg_temp_free(t1);
> +}
> +#endif
> static void gen_muldiv (DisasContext *ctx, uint32_t opc,
> int rs, int rt)
> {
> @@ -2149,6 +2482,10 @@ static void gen_muldiv (DisasContext *ctx, uint32_t
> opc,
> gen_helper_dmult(t0, t1);
> opn = "dmult";
> break;
> + case OPC_DMUL:
> + gen_helper_dmult(t0, t1);
> + opn = "dmul";
> + break;
> case OPC_DMULTU:
> gen_helper_dmultu(t0, t1);
> opn = "dmultu";
> @@ -2368,7 +2705,49 @@ static void gen_cl (DisasContext *ctx, uint32_t opc,
> MIPS_DEBUG("%s %s, %s", opn, regnames[rd], regnames[rs]);
> tcg_temp_free(t0);
> }
> +#if defined(TARGET_MIPS64)
> +static void insn_opc_pop (DisasContext *ctx, CPUState *env, uint32_t opc,
> + int rd, int rs, int rt)
> +{
> + TCGv num = tcg_temp_new();
> + TCGv res = tcg_temp_new();
> + target_ulong maskb = 1;
> + gen_load_gpr(num, rs);
> + TCGv >
> + gen_load_gpr(ones, 0);
> + int x=1;
> + tcg_gen_andi_tl(res, num, maskb);
> + tcg_gen_add_tl(ones,ones, res);
> + while (x <= 31) {
> + tcg_gen_shri_i64(num, num, 1);
> + tcg_gen_andi_tl(res, num, maskb);
> + tcg_gen_add_tl(ones, ones, res);
> + x++;
> + }
> + gen_store_gpr(ones, rd);
> +}
> +static void insn_opc_dpop (DisasContext *ctx, CPUState *env, uint32_t opc,
> + int rd, int rs, int rt)
> +{
> + TCGv num, res, ones;
> + num = tcg_temp_new();
> + res = tcg_temp_new();
> + >
> + target_ulong maskb = 1;
> + gen_load_gpr(num, rs);
> + int x = 1;
> + tcg_gen_andi_tl(res, num, maskb);
> + tcg_gen_mov_tl(ones, res);
>
> + while (x <= 63) {
> + tcg_gen_shri_i64(num, num, 1);
> + tcg_gen_andi_tl(res, num, maskb);
> + tcg_gen_add_tl(ones, ones, res);
> + x++;
> + }
> + gen_store_gpr(ones, rd);
> +}
> +#endif
> /* Godson integer instructions */
> static void gen_loongson_integer (DisasContext *ctx, uint32_t opc,
> int rd, int rs, int rt)
> @@ -2705,6 +3084,7 @@ static void gen_compute_branch (DisasContext *ctx,
> uint32_t opc,
> target_ulong btgt = -1;
> int blink = 0;
> int bcond_compute = 0;
> + target_ulong maskb; /* Used in BBIT0 and BBIT1*/
> TCGv t0 = tcg_temp_new();
> TCGv t1 = tcg_temp_new();
>
> @@ -2730,6 +3110,39 @@ static void gen_compute_branch (DisasContext *ctx,
> uint32_t opc,
> }
> btgt = ctx->pc + insn_bytes + offset;
> break;
> + case OPC_BBIT1:
> + gen_load_gpr(t0, rs);
> + gen_load_gpr(t1, 0);
> + maskb = 1ULL << rt;
> + tcg_gen_andi_tl(t0, t0, maskb);
> + bcond_compute = 1;
> + btgt = ctx->pc + insn_bytes + offset;
> + break;
> + case OPC_BBIT132:
> + gen_load_gpr(t0, rs);
> + gen_load_gpr(t1, 0);
> + maskb = 1ULL << (rt + 32);
> + tcg_gen_andi_tl(t0, t0, maskb);
> + bcond_compute = 1;
> + btgt = ctx->pc + insn_bytes + offset;
> + break;
> + case OPC_BBIT0:
> + gen_load_gpr(t0, rs);
> + gen_load_gpr(t1, 0);
> + maskb = 1ULL << rt;
> + tcg_gen_andi_tl(t0, t0, maskb);
> + bcond_compute = 1;
> + btgt = ctx->pc + insn_bytes + offset;
> + break;
> + case OPC_BBIT032:
> + gen_load_gpr(t0, rs);
> + gen_load_gpr(t1, 0);
> + maskb = 1ULL << (rt + 32);
> + tcg_gen_andi_tl(t0, t0, maskb);
> + bcond_compute = 1;
> + btgt = ctx->pc + insn_bytes + offset;
> + break;
> +
> case OPC_BGEZ:
> case OPC_BGEZAL:
> case OPC_BGEZALS:
> @@ -2888,6 +3301,18 @@ static void gen_compute_branch (DisasContext *ctx,
> uint32_t opc,
> MIPS_DEBUG("bne %s, %s, " TARGET_FMT_lx,
> regnames[rs], regnames[rt], btgt);
> goto not_likely;
> + case OPC_BBIT1:
> + tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
> + goto not_likely;
> + case OPC_BBIT132:
> + tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
> + goto not_likely;
> + case OPC_BBIT0:
> + tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
> + goto not_likely;
> + case OPC_BBIT032:
> + tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
> + goto not_likely;
> case OPC_BNEL:
> tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
> MIPS_DEBUG("bnel %s, %s, " TARGET_FMT_lx,
> @@ -2983,7 +3408,44 @@ static void gen_compute_branch (DisasContext *ctx,
> uint32_t opc,
> tcg_temp_free(t0);
> tcg_temp_free(t1);
> }
> +/*For cavium specific extract instructions*/
> +#if defined(TARGET_MIPS64)
> +static void gen_exts (CPUState *env,DisasContext *ctx, uint32_t opc, int
> rt,
> + int rs, int lsb, int msb)
> +{
> + TCGv t0 = tcg_temp_new();
> + TCGv t1 = tcg_temp_new();
> + target_ulong mask;
> + gen_load_gpr(t1, rs);
> + switch (opc) {
> + case OPC_EXTS:
> + tcg_gen_shri_tl(t0, t1, lsb);
> + tcg_gen_andi_tl(t0, t0, (1ULL << (msb + 1)) - 1);
> + /* To sign extened the remaining bits according to
> + the msb of the bit field */
> + mask = 1ULL << msb;
> + tcg_gen_andi_tl(t1, t0, mask);
> + tcg_gen_addi_tl(t1, t1, -1);
> + tcg_gen_not_i64(t1, t1);
> + tcg_gen_or_tl(t0, t0, t1);
> + gen_store_gpr(t0, rt);
> + break;
> + case OPC_EXTS32:
> + tcg_gen_shri_tl(t0, t1, lsb + 32);
> + tcg_gen_andi_tl(t0, t0, (1ULL << (msb + 1)) - 1);
> + mask = 1ULL << msb;
> + tcg_gen_andi_tl(t1, t0, mask);
> + tcg_gen_addi_tl(t1, t1, -1);
> + tcg_gen_not_i64(t1, t1);
> + tcg_gen_or_tl(t0, t0, t1);
> + gen_store_gpr(t0, rt);
> + break;
>
> + }
> + tcg_temp_free(t0);
> + tcg_temp_free(t1);
> +}
> +#endif
> /* special3 bitfield operations */
> static void gen_bitops (DisasContext *ctx, uint32_t opc, int rt,
> int rs, int lsb, int msb)
> @@ -3063,6 +3525,22 @@ static void gen_bitops (DisasContext *ctx, uint32_t
> opc, int rt,
> tcg_gen_andi_tl(t1, t1, mask);
> tcg_gen_or_tl(t0, t0, t1);
> break;
> + case OPC_CINS:
> + mask = (1ULL << (msb+1))-1;
> + gen_load_gpr(t0, rt);
> + tcg_gen_andi_tl(t0, t0, 0);
> + tcg_gen_andi_tl(t1, t1, mask);
> + tcg_gen_shli_tl(t1, t1, lsb);
> + tcg_gen_or_tl(t0, t0, t1);
> + break;
> + case OPC_CINS32:
> + mask = (1ULL << (msb+1))-1;
> + gen_load_gpr(t0, rt);
> + tcg_gen_andi_tl(t0, t0, 0);
> + tcg_gen_andi_tl(t1, t1, mask);
> + tcg_gen_shli_tl(t1, t1, (lsb+32));
> + tcg_gen_or_tl(t0, t0, t1);
> + break;
> #endif
> default:
> fail:
> @@ -11605,7 +12083,7 @@ static void decode_opc (CPUState *env, DisasContext
> *ctx, int *is_branch)
> int32_t offset;
> int rs, rt, rd, sa;
> uint32_t op, op1, op2;
> - int16_t imm;
> + int16_t imm, imm10;
>
> /* make sure instructions are on a word boundary */
> if (ctx->pc & 0x3) {
> @@ -11634,6 +12112,9 @@ static void decode_opc (CPUState *env, DisasContext
> *ctx, int *is_branch)
> rd = (ctx->opcode >> 11) & 0x1f;
> sa = (ctx->opcode >> 6) & 0x1f;
> imm = (int16_t)ctx->opcode;
> + /* 10 bit Immediate value For SEQI,SNEI */
> + imm10 = (ctx->opcode >> 6) & 0x3ff;
> +
> switch (op) {
> case OPC_SPECIAL:
> op1 = MASK_SPECIAL(ctx->opcode);
> @@ -11859,6 +12340,71 @@ static void decode_opc (CPUState *env, DisasContext
> *ctx, int *is_branch)
> case OPC_MUL:
> gen_arith(env, ctx, op1, rd, rs, rt);
> break;
> +#if defined(TARGET_MIPS64)
> +
> +
> + case OPC_DMUL:
> + check_insn(env, ctx, ISA_MIPS3);
> + check_mips_64(ctx);
> + gen_muldiv(ctx, op1, rs, rt);
> + tcg_gen_mov_tl(cpu_gpr[rd], cpu_LO[0]);
> + break;
> + case OPC_CINS:
> + check_insn(env, ctx, ISA_MIPS64R2);
> + check_mips_64(ctx);
> + gen_bitops(ctx, op1, rt, rs, sa, rd);
> + break;
> + case OPC_CINS32:
> + check_mips_64(ctx);
> + gen_bitops(ctx, op1, rt, rs, sa, rd);
> + break;
> + case OPC_MTM0:
> + check_mips_64(ctx);
> + gen_LMI(env, ctx, op1, rs, rt, rd);
> + break;
> + case OPC_MTM1:
> + check_mips_64(ctx);
> + gen_LMI(env, ctx, op1, rs, rt, rd);
> + break;
> + case OPC_MTM2:
> + check_mips_64(ctx);
> + gen_LMI(env, ctx, op1, rs, rt, rd);
> + break;
> + case OPC_MTP0:
> + check_mips_64(ctx);
> + gen_LMI(env, ctx, op1, rs, rt, rd);
> + break;
> + case OPC_MTP1:
> + check_mips_64(ctx);
> + gen_LMI(env, ctx, op1, rs, rt, rd);
> + break;
> + case OPC_MTP2:
> + check_mips_64(ctx);
> + gen_LMI(env, ctx, op1, rs, rt, rd);
> + break;
> + case OPC_VMULU:
> + check_mips_64(ctx);
> + gen_LMI(env, ctx, op1, rs, rt, rd);
> + break;
> + case OPC_BADDU:
> + gen_arith(env, ctx, op1, rd, rs, rt);
> + break;
> + case OPC_EXTS:
> + check_mips_64(ctx);
> + gen_exts(env, ctx, op1, rt, rs, sa, rd);
> + break;
> + case OPC_EXTS32:
> + check_mips_64(ctx);
> + gen_exts(env, ctx, op1, rt, rs, sa, rd);
> + break;
> + case OPC_SAA:
> + gen_saa(env, ctx, op1, rt, rs);
> + break;
> + case OPC_SAAD:
> + check_mips_64(ctx);
> + gen_saa(env, ctx, op1, rt, rs);
> + break;
> +#endif
> case OPC_CLO:
> case OPC_CLZ:
> check_insn(env, ctx, ISA_MIPS32);
> @@ -11878,13 +12424,24 @@ static void decode_opc (CPUState *env,
> DisasContext *ctx, int *is_branch)
> break;
> case OPC_DIV_G_2F:
> case OPC_DIVU_G_2F:
> - case OPC_MULT_G_2F:
> case OPC_MULTU_G_2F:
> case OPC_MOD_G_2F:
> case OPC_MODU_G_2F:
> check_insn(env, ctx, INSN_LOONGSON2F);
> gen_loongson_integer(ctx, op1, rd, rs, rt);
> break;
> + case OPC_MULT_G_2F:
> + if (!TARGET_OCTEON) {
> + check_insn(env, ctx, INSN_LOONGSON2F);
> + gen_loongson_integer(ctx, op1, rd, rs, rt);
> + } else {
> +#if defined(TARGET_MIPS64)
> + /* Cavium Specific vmm0 */
> + check_mips_64(ctx);
> + gen_LMI(env, ctx, op1, rs, rt, rd);
> +#endif
> + }
> + break;
> #if defined(TARGET_MIPS64)
> case OPC_DCLO:
> case OPC_DCLZ:
> @@ -11892,7 +12449,6 @@ static void decode_opc (CPUState *env, DisasContext
> *ctx, int *is_branch)
> check_mips_64(ctx);
> gen_cl(ctx, op1, rd, rs);
> break;
> - case OPC_DMULT_G_2F:
> case OPC_DMULTU_G_2F:
> case OPC_DDIV_G_2F:
> case OPC_DDIVU_G_2F:
> @@ -11901,6 +12457,39 @@ static void decode_opc (CPUState *env, DisasContext
> *ctx, int *is_branch)
> check_insn(env, ctx, INSN_LOONGSON2F);
> gen_loongson_integer(ctx, op1, rd, rs, rt);
> break;
> + case OPC_DMULT_G_2F:
> + if (!TARGET_OCTEON) {
> + check_insn(env, ctx, INSN_LOONGSON2F);
> + gen_loongson_integer(ctx, op1, rd, rs, rt);
> + } else {
> + /* Cavium Specific instruction v3mulu */
> + check_mips_64(ctx);
> + gen_LMI(env, ctx, op1, rs, rt, rd);
> + }
> + break;
> + case OPC_SEQ:
> + check_mips_64(ctx);
> + gen_seqsne(ctx, op1, rd, rs, rt);
> + break;
> + case OPC_SNE:
> + check_mips_64(ctx);
> + gen_seqsne(ctx, op1, rd, rs, rt);
> + break;
> + case OPC_SEQI:
> + check_mips_64(ctx);
> + gen_set_imm(env, op1, rt, rs, imm10);
> + break;
> + case OPC_SNEI:
> + check_mips_64(ctx);
> + gen_set_imm(env, op1, rt, rs, imm10);
> + break;
> + case OPC_POP:
> + insn_opc_pop(ctx, env, op1, rd, rs, rt);
> + break;
> + case OPC_DPOP:
> + check_mips_64(ctx);
> + insn_opc_dpop(ctx, env, op1, rd, rs, rt);
> + break;
> #endif
> default: /* Invalid */
> MIPS_INVAL("special2");
> @@ -12192,10 +12781,32 @@ static void decode_opc (CPUState *env,
> DisasContext *ctx, int *is_branch)
> break;
>
> /* COP2. */
> - case OPC_LWC2:
> - case OPC_LDC2:
> - case OPC_SWC2:
> - case OPC_SDC2:
> + /* Conflicting opcodes with Cavium specific branch instructions
> + if TARGET_OCTEON is set these opcodes will belong to Cavium */
> + case OPC_LWC2: /*BBIT0*/
> + if(TARGET_OCTEON) {
> + gen_compute_branch(ctx, op, 4, rs, rt, imm << 2);
> + *is_branch = 1;
> + break;
> + }
> + case OPC_LDC2: /*BBIT032*/
> + if(TARGET_OCTEON) {
> + gen_compute_branch(ctx, op, 4, rs, rt, imm << 2);
> + *is_branch = 1;
> + break;
> + }
> + case OPC_SWC2: /*BBIT1*/
> + if(TARGET_OCTEON) {
> + gen_compute_branch(ctx, op, 4, rs, rt, imm << 2);
> + *is_branch = 1;
> + break;
> + }
> + case OPC_SDC2: /*BBIT132*/
> + if(TARGET_OCTEON) {
> + gen_compute_branch(ctx, op, 4, rs, rt, imm << 2);
> + *is_branch = 1;
> + break;
> + }
> case OPC_CP2:
> /* COP2: Not implemented. */
> generate_exception_err(ctx, EXCP_CpU, 2);
> @@ -12584,6 +13195,18 @@ static void mips_tcg_init(void)
> cpu_dspctrl = tcg_global_mem_new(TCG_AREG0,
> offsetof(CPUState,
> active_tc.DSPControl),
> "DSPControl");
> + mpl0 = tcg_global_mem_new(TCG_AREG0,
> + offsetof(CPUState, Reg.MPL0), "MPL0");
> + mpl1 = tcg_global_mem_new(TCG_AREG0,
> + offsetof(CPUState, Reg.MPL1), "MPL1");
> + mpl2 = tcg_global_mem_new(TCG_AREG0,
> + offsetof(CPUState, Reg.MPL2), "MPL2");
> + p0 = tcg_global_mem_new(TCG_AREG0,
> + offsetof(CPUState, Reg.P0), "P0");
> + p1 = tcg_global_mem_new(TCG_AREG0,
> + offsetof(CPUState, Reg.P1), "P1");
> + p2 = tcg_global_mem_new(TCG_AREG0,
> + offsetof(CPUState, Reg.P2), "P2");
> bcond = tcg_global_mem_new(TCG_AREG0,
> offsetof(CPUState, bcond), "bcond");
> btarget = tcg_global_mem_new(TCG_AREG0,
> @@ -12607,6 +13230,18 @@ static void mips_tcg_init(void)
>
> #include "translate_init.c"
>
> +#if defined(TARGET_MIPS64)
> +
> +static void set_cvmctl_register(CPUMIPSState *env)
> +{
> + env->CvmCtlRegister.cvmctl = env->CvmCtlRegister.cvmctl
> + ^ env->CvmCtlRegister.cvmctl;
> + env->CvmCtlRegister.cvmctl =
> FUSE_START_BIT(env->CvmCtlRegister.cvmctl);
> + env->CvmCtlRegister.cvmctl = KASUMI(env->CvmCtlRegister.cvmctl);
> + env->CvmCtlRegister.cvmctl = IPPCI(env->CvmCtlRegister.cvmctl);
> + env->CvmCtlRegister.cvmctl = IPTI(env->CvmCtlRegister.cvmctl);
> +}
> +#endif
> CPUMIPSState *cpu_mips_init (const char *cpu_model)
> {
> CPUMIPSState *env;
> @@ -12619,6 +13254,10 @@ CPUMIPSState *cpu_mips_init (const char *cpu_model)
> env->cpu_model = def;
> env->cpu_model_str = cpu_model;
>
> +#if defined(TARGET_MIPS64)
> + /*Function for setting cvmctl register*/
> + set_cvmctl_register(env);
> +#endif
> cpu_exec_init(env);
> #ifndef CONFIG_USER_ONLY
> mmu_init(env, def);
> --
> 1.7.3.4