[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [patch] Various (mostly) x86-64 related patches
From: |
Filip Navara |
Subject: |
Re: [Qemu-devel] [patch] Various (mostly) x86-64 related patches |
Date: |
Thu, 21 Jul 2005 12:36:13 +0200 |
User-agent: |
Mozilla Thunderbird 0.9 (Windows/20041103) |
Filip Navara wrote:
[snip]
qemu-20050718-02-insns.patch
- Add support for AMD cache info MCR.
- Fix INVLPG instruction in 64bit mode.
- Implement the ENTER instruction for 64bit mode.
- Add dummy support for MCA, PAT, MTRR and CLFLUSH.
- Correct size of 16bit stack pushes in 64bit mode.
- Correct segment arithmetics for various instructions in 64bit
mode.
- Correctly honour the ADDR prefix for these instrutions:
maskmov; mov EAX, Ov; mov Ov, EAX
- Fix the implicit 64bit semantics for these instrutions:
ret im; lcall; lret
- Support the PREFETCHW instruction.
This time with the correct patch (qemu-20050721-02-insns.patch).
qemu-20050718-03-apic.patch
[snip]
I forgot to credit malc_ for his extensive testing, ideas and general
help with this patch. Thanks!
- Filip
Index: target-i386/cpu.h
===================================================================
RCS file: /cvsroot/qemu/qemu/target-i386/cpu.h,v
retrieving revision 1.30
diff -u -r1.30 cpu.h
--- target-i386/cpu.h 23 Apr 2005 17:46:55 -0000 1.30
+++ target-i386/cpu.h 21 Jul 2005 09:57:34 -0000
@@ -214,6 +214,12 @@
#define MSR_IA32_SYSENTER_ESP 0x175
#define MSR_IA32_SYSENTER_EIP 0x176
+#define MSR_MCG_CAP 0x179
+#define MSR_MCG_STATUS 0x17a
+#define MSR_MCG_CTL 0x17b
+
+#define MSR_PAT 0x277
+
#define MSR_EFER 0xc0000080
#define MSR_EFER_SCE (1 << 0)
@@ -231,26 +237,29 @@
#define MSR_KERNELGSBASE 0xc0000102
/* cpuid_features bits */
-#define CPUID_FP87 (1 << 0)
-#define CPUID_VME (1 << 1)
-#define CPUID_DE (1 << 2)
-#define CPUID_PSE (1 << 3)
-#define CPUID_TSC (1 << 4)
-#define CPUID_MSR (1 << 5)
-#define CPUID_PAE (1 << 6)
-#define CPUID_MCE (1 << 7)
-#define CPUID_CX8 (1 << 8)
-#define CPUID_APIC (1 << 9)
-#define CPUID_SEP (1 << 11) /* sysenter/sysexit */
-#define CPUID_MTRR (1 << 12)
-#define CPUID_PGE (1 << 13)
-#define CPUID_MCA (1 << 14)
-#define CPUID_CMOV (1 << 15)
+#define CPUID_FP87 (1 << 0)
+#define CPUID_VME (1 << 1)
+#define CPUID_DE (1 << 2)
+#define CPUID_PSE (1 << 3)
+#define CPUID_TSC (1 << 4)
+#define CPUID_MSR (1 << 5)
+#define CPUID_PAE (1 << 6)
+#define CPUID_MCE (1 << 7)
+#define CPUID_CX8 (1 << 8)
+#define CPUID_APIC (1 << 9)
+#define CPUID_SEP (1 << 11) /* sysenter/sysexit */
+#define CPUID_MTRR (1 << 12)
+#define CPUID_PGE (1 << 13)
+#define CPUID_MCA (1 << 14)
+#define CPUID_CMOV (1 << 15)
+#define CPUID_PAT (1 << 16)
/* ... */
-#define CPUID_MMX (1 << 23)
-#define CPUID_FXSR (1 << 24)
-#define CPUID_SSE (1 << 25)
-#define CPUID_SSE2 (1 << 26)
+#define CPUID_CLFLUSH (1 << 19)
+/* ... */
+#define CPUID_MMX (1 << 23)
+#define CPUID_FXSR (1 << 24)
+#define CPUID_SSE (1 << 25)
+#define CPUID_SSE2 (1 << 26)
#define CPUID_EXT_SS3 (1 << 0)
#define CPUID_EXT_MONITOR (1 << 3)
@@ -473,6 +482,8 @@
target_ulong fmask;
target_ulong kernelgsbase;
#endif
+
+ uint64_t pat;
/* temporary data for USE_CODE_COPY mode */
#ifdef USE_CODE_COPY
Index: target-i386/exec.h
===================================================================
RCS file: /cvsroot/qemu/qemu/target-i386/exec.h,v
retrieving revision 1.24
diff -u -r1.24 exec.h
--- target-i386/exec.h 20 Mar 2005 10:39:24 -0000 1.24
+++ target-i386/exec.h 21 Jul 2005 09:57:34 -0000
@@ -157,11 +157,11 @@
void helper_ltr_T0(void);
void helper_movl_crN_T0(int reg);
void helper_movl_drN_T0(int reg);
-void helper_invlpg(unsigned int addr);
+void helper_invlpg(target_ulong addr);
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
-void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr);
+void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr);
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write, int is_user, int is_softmmu);
void tlb_fill(target_ulong addr, int is_write, int is_user,
Index: target-i386/helper.c
===================================================================
RCS file: /cvsroot/qemu/qemu/target-i386/helper.c,v
retrieving revision 1.49
diff -u -r1.49 helper.c
--- target-i386/helper.c 24 Apr 2005 18:04:33 -0000 1.49
+++ target-i386/helper.c 21 Jul 2005 09:57:34 -0000
@@ -1334,6 +1334,20 @@
ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
break;
+ case 0x80000005:
+ /* cache info (L1 cache) */
+ EAX = 0x01ff01ff;
+ EBX = 0x01ff01ff;
+ ECX = 0x40020140;
+ EDX = 0x40020140;
+ break;
+ case 0x80000006:
+ /* cache info (L2 cache) */
+ EAX = 0;
+ EBX = 0x42004200;
+ ECX = 0x02008140;
+ EDX = 0;
+ break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
EAX = 0x00003028;
@@ -1354,12 +1368,26 @@
void helper_enter_level(int level, int data32)
{
target_ulong ssp;
- uint32_t esp_mask, esp, ebp;
+ uint32_t esp_mask;
+ target_ulong esp, ebp;
esp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;
ebp = EBP;
esp = ESP;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ /* 64 bit */
+ esp -= 8;
+ while (--level) {
+ esp -= 8;
+ ebp -= 8;
+ stq(ssp + esp, ldq(ssp + ebp));
+ }
+ esp -= 8;
+ stq(ssp + esp, T1);
+ } else
+#endif
if (data32) {
/* 32 bit */
esp -= 4;
@@ -2271,7 +2299,7 @@
env->dr[reg] = T0;
}
-void helper_invlpg(unsigned int addr)
+void helper_invlpg(target_ulong addr)
{
cpu_x86_flush_tlb(env, addr);
}
@@ -2332,6 +2360,9 @@
case MSR_STAR:
env->star = val;
break;
+ case MSR_PAT:
+ env->pat = val;
+ break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
env->lstar = val;
@@ -2379,6 +2410,9 @@
break;
case MSR_STAR:
val = env->star;
+ break;
+ case MSR_PAT:
+ val = env->pat;
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
Index: target-i386/helper2.c
===================================================================
RCS file: /cvsroot/qemu/qemu/target-i386/helper2.c,v
retrieving revision 1.34
diff -u -r1.34 helper2.c
--- target-i386/helper2.c 3 Jul 2005 21:29:17 -0000 1.34
+++ target-i386/helper2.c 21 Jul 2005 09:57:34 -0000
@@ -106,7 +106,9 @@
env->cpuid_version = (family << 8) | (model << 4) | stepping;
env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
CPUID_TSC | CPUID_MSR | CPUID_MCE |
- CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
+ CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
+ CPUID_PAT);
+ env->pat = 0x0007040600070406ULL;
env->cpuid_ext_features = 0;
env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2
| CPUID_PAE | CPUID_SEP;
env->cpuid_xlevel = 0;
@@ -128,6 +130,9 @@
env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL;
env->cpuid_xlevel = 0x80000008;
+
+ /* these features are needed for Win64 and aren't fully implemented */
+ env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
#endif
}
cpu_single_env = env;
@@ -546,7 +551,7 @@
}
/* XXX: also flush 4MB pages */
-void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
+void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
{
tlb_flush_page(env, addr);
}
Index: target-i386/op.c
===================================================================
RCS file: /cvsroot/qemu/qemu/target-i386/op.c,v
retrieving revision 1.37
diff -u -r1.37 op.c
--- target-i386/op.c 26 Apr 2005 20:38:17 -0000 1.37
+++ target-i386/op.c 21 Jul 2005 09:57:34 -0000
@@ -898,6 +898,11 @@
}
#ifdef TARGET_X86_64
+void op_subq_A0_2(void)
+{
+ A0 -= 2;
+}
+
void op_subq_A0_8(void)
{
A0 -= 8;
Index: target-i386/translate.c
===================================================================
RCS file: /cvsroot/qemu/qemu/target-i386/translate.c,v
retrieving revision 1.49
diff -u -r1.49 translate.c
--- target-i386/translate.c 23 Apr 2005 17:53:12 -0000 1.49
+++ target-i386/translate.c 21 Jul 2005 10:05:13 -0000
@@ -1604,7 +1604,14 @@
else
override = R_DS;
}
- gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+#ifdef TARGET_X86_64
+ if (CODE64(s)) {
+ gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base));
+ } else
+#endif
+ {
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+ }
}
}
@@ -1627,7 +1634,14 @@
override = R_DS;
}
if (must_add_seg) {
- gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+#ifdef TARGET_X86_64
+ if (CODE64(s)) {
+ gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base));
+ } else
+#endif
+ {
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+ }
}
}
@@ -1948,10 +1962,14 @@
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
- /* XXX: check 16 bit behaviour */
gen_op_movq_A0_reg[R_ESP]();
- gen_op_subq_A0_8();
- gen_op_st_T0_A0[OT_QUAD + s->mem_index]();
+ if (s->dflag) {
+ gen_op_subq_A0_8();
+ gen_op_st_T0_A0[OT_QUAD + s->mem_index]();
+ } else {
+ gen_op_subq_A0_2();
+ gen_op_st_T0_A0[OT_WORD + s->mem_index]();
+ }
gen_op_movq_ESP_A0();
} else
#endif
@@ -1985,10 +2003,14 @@
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
- /* XXX: check 16 bit behaviour */
gen_op_movq_A0_reg[R_ESP]();
- gen_op_subq_A0_8();
- gen_op_st_T1_A0[OT_QUAD + s->mem_index]();
+ if (s->dflag) {
+ gen_op_subq_A0_8();
+ gen_op_st_T1_A0[OT_QUAD + s->mem_index]();
+ } else {
+ gen_op_subq_A0_2();
+ gen_op_st_T0_A0[OT_WORD + s->mem_index]();
+ }
gen_op_movq_ESP_A0();
} else
#endif
@@ -2020,9 +2042,8 @@
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
- /* XXX: check 16 bit behaviour */
gen_op_movq_A0_reg[R_ESP]();
- gen_op_ld_T0_A0[OT_QUAD + s->mem_index]();
+ gen_op_ld_T0_A0[(s->dflag ? OT_QUAD : OT_WORD) + s->mem_index]();
} else
#endif
{
@@ -2041,7 +2062,7 @@
static void gen_pop_update(DisasContext *s)
{
#ifdef TARGET_X86_64
- if (CODE64(s)) {
+ if (CODE64(s) && s->dflag) {
gen_stack_update(s, 8);
} else
#endif
@@ -2105,26 +2126,38 @@
{
int ot, opsize;
- ot = s->dflag + OT_WORD;
+ if (CODE64(s)) {
+ ot = OT_QUAD;
+ opsize = 8;
+ } else {
+ ot = s->ss32 + OT_WORD;
+ opsize = 2 << s->ss32;
+ }
level &= 0x1f;
- opsize = 2 << s->dflag;
- gen_op_movl_A0_ESP();
- gen_op_addl_A0_im(-opsize);
- if (!s->ss32)
- gen_op_andl_A0_ffff();
- gen_op_movl_T1_A0();
- if (s->addseg)
- gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
+ if (CODE64(s)) {
+ gen_op_movq_A0_ESP();
+ gen_op_addq_A0_im(-opsize);
+ if (s->addseg)
+ gen_op_addq_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
+ } else {
+ gen_op_movl_A0_ESP();
+ gen_op_addl_A0_im(-opsize);
+ if (!s->ss32)
+ gen_op_andl_A0_ffff();
+ gen_op_movl_T1_A0();
+ if (s->addseg)
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
+ }
/* push bp */
- gen_op_mov_TN_reg[OT_LONG][0][R_EBP]();
+ gen_op_mov_TN_reg[ot][0][R_EBP]();
gen_op_st_T0_A0[ot + s->mem_index]();
if (level) {
gen_op_enter_level(level, s->dflag);
}
gen_op_mov_reg_T1[ot][R_EBP]();
gen_op_addl_T1_im( -esp_addend + (-opsize * level) );
- gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP]();
+ gen_op_mov_reg_T1[ot][R_ESP]();
}
static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
@@ -2901,7 +2934,7 @@
if (mod != 3)
goto illegal_op;
#ifdef TARGET_X86_64
- if (CODE64(s)) {
+ if (s->aflag == 2) {
gen_op_movq_A0_reg[R_EDI]();
} else
#endif
@@ -3442,6 +3475,8 @@
gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
gen_op_ldu_T0_A0[OT_WORD + s->mem_index]();
do_lcall:
+ if (CODE64(s) && s->dflag)
+ s->dflag = 2;
if (s->pe && !s->vm86) {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -3635,7 +3670,12 @@
/**************************/
/* push/pop */
case 0x50 ... 0x57: /* push */
- gen_op_mov_TN_reg[OT_LONG][0][(b & 7) | REX_B(s)]();
+ if (CODE64(s)) {
+ ot = dflag ? OT_QUAD : OT_WORD;
+ } else {
+ ot = dflag + OT_WORD;
+ }
+ gen_op_mov_TN_reg[ot][0][(b & 7) | REX_B(s)]();
gen_push_T0(s);
break;
case 0x58 ... 0x5f: /* pop */
@@ -3697,7 +3737,6 @@
break;
case 0xc8: /* enter */
{
- /* XXX: long mode support */
int level;
val = lduw_code(s->pc);
s->pc += 2;
@@ -3707,7 +3746,6 @@
break;
case 0xc9: /* leave */
/* XXX: exception not precise (ESP is updated before potential
exception) */
- /* XXX: may be invalid for 16 bit in long mode */
if (CODE64(s)) {
gen_op_mov_TN_reg[OT_QUAD][0][R_EBP]();
gen_op_mov_reg_T0[OT_QUAD][R_ESP]();
@@ -3926,7 +3964,7 @@
else
ot = dflag + OT_WORD;
#ifdef TARGET_X86_64
- if (CODE64(s)) {
+ if (s->aflag == 2) {
offset_addr = ldq_code(s->pc);
s->pc += 8;
if (offset_addr == (int32_t)offset_addr)
@@ -3955,7 +3993,7 @@
break;
case 0xd7: /* xlat */
#ifdef TARGET_X86_64
- if (CODE64(s)) {
+ if (s->aflag == 2) {
gen_op_movq_A0_reg[R_EBX]();
gen_op_addq_A0_AL();
} else
@@ -4779,6 +4817,8 @@
val = ldsw_code(s->pc);
s->pc += 2;
gen_pop_T0(s);
+ if (CODE64(s) && s->dflag)
+ s->dflag = 2;
gen_stack_update(s, val + (2 << s->dflag));
if (s->dflag == 0)
gen_op_andl_T0_ffff();
@@ -4801,6 +4841,8 @@
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
+ if (CODE64(s) && s->dflag)
+ s->dflag = 2;
gen_op_lret_protected(s->dflag, val);
} else {
gen_stack_A0(s);
@@ -5782,13 +5824,26 @@
break;
case 5: /* lfence */
case 6: /* mfence */
- case 7: /* sfence */
- if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
- goto illegal_op;
- break;
+ case 7: /* sfence / clflush */
+ if ((modrm & 0xc7) == 0xc0) {
+ if (!(s->cpuid_features & CPUID_SSE))
+ goto illegal_op;
+ break;
+ } else
+ if (op == 7) {
+ if (!(s->cpuid_features & CPUID_CLFLUSH))
+ goto illegal_op;
+ break;
+ }
+ /* fallback */
default:
goto illegal_op;
}
+ break;
+ case 0x10d:
+ modrm = ldub_code(s->pc++);
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ /* ignore for now */
break;
case 0x110 ... 0x117:
case 0x128 ... 0x12f: