diff --git a/efiemu/runtime/efiemu.c b/efiemu/runtime/efiemu.c index 5887c48..f73f843 100644 --- a/efiemu/runtime/efiemu.c +++ b/efiemu/runtime/efiemu.c @@ -162,8 +162,8 @@ static inline void write_cmos (grub_uint8_t addr, grub_uint8_t val) { __asm__ __volatile__ ("outb %%al,$0x70\n" - "mov %%bl, %%al\n" - "outb %%al,$0x71": :"a" (addr), "b" (val)); + "mov %%cl, %%al\n" + "outb %%al,$0x71": :"a" (addr), "c" (val)); } static inline grub_uint8_t diff --git a/include/grub/i386/tsc.h b/include/grub/i386/tsc.h index 9633701..aa40145 100644 --- a/include/grub/i386/tsc.h +++ b/include/grub/i386/tsc.h @@ -29,8 +29,24 @@ grub_get_tsc (void) /* The CPUID instruction is a 'serializing' instruction, and avoids out-of-order execution of the RDTSC instruction. */ +#ifdef APPLE_CC __asm__ __volatile__ ("xorl %%eax, %%eax\n\t" - "cpuid":::"%rax", "%rbx", "%rcx", "%rdx"); +#ifdef __x86_64__ + "push %%rbx\n" +#else + "push %%ebx\n" +#endif + "cpuid\n" +#ifdef __x86_64__ + "pop %%rbx\n" +#else + "pop %%ebx\n" +#endif + :::"%rax", "%rcx", "%rdx"); +#else + __asm__ __volatile__ ("xorl %%eax, %%eax\n\t" + "cpuid":::"%rax", "%rbx", "%rcx", "%rdx"); +#endif /* Read TSC value. We cannot use "=A", since this would use %rax on x86_64. */ __asm__ __volatile__ ("rdtsc":"=a" (lo), "=d" (hi)); @@ -93,11 +109,29 @@ grub_cpu_is_tsc_supported (void) return 0; grub_uint32_t features; +#ifdef APPLE_CC __asm__ ("movl $1, %%eax\n\t" - "cpuid" +#ifdef __x86_64__ + "push %%rbx\n" +#else + "push %%ebx\n" +#endif + "cpuid\n" +#ifdef __x86_64__ + "pop %%rbx\n" +#else + "pop %%ebx\n" +#endif + : "=d" (features) + : /* No inputs. */ + : /* Clobbered: */ "%rax", "%rcx"); +#else + __asm__ ("movl $1, %%eax\n\t" + "cpuid\n" : "=d" (features) : /* No inputs. */ : /* Clobbered: */ "%rax", "%rbx", "%rcx"); +#endif return (features & (1 << 4)) != 0; } diff --git a/loader/i386/xnu.c b/loader/i386/xnu.c index 0860160..2fb490d 100644 --- a/loader/i386/xnu.c +++ b/loader/i386/xnu.c @@ -125,6 +125,28 @@ guessfsb (void) if (! grub_cpu_is_cpuid_supported ()) return sane_value; + +#ifdef APPLE_CC + asm volatile ("movl $0, %%eax\n" +#ifdef __x86_64__ + "push %%rbx\n" +#else + "push %%ebx\n" +#endif + "cpuid\n" +#ifdef __x86_64__ + "pop %%rbx\n" +#else + "pop %%ebx\n" +#endif + : "=a" (max_cpuid), + "=d" (manufacturer[1]), "=c" (manufacturer[2])); + + /* Only Intel for now is done. */ + if (grub_memcmp (manufacturer+1, "ineIntel", 12) != 0) + return sane_value; + +#else asm volatile ("movl $0, %%eax\n" "cpuid" : "=a" (max_cpuid), "=b" (manufacturer[0]), @@ -133,15 +155,33 @@ guessfsb (void) /* Only Intel for now is done. */ if (grub_memcmp (manufacturer, "GenuineIntel", 12) != 0) return sane_value; +#endif /* Check Speedstep. */ if (max_cpuid < 1) return sane_value; +#ifdef APPLE_CC + asm volatile ("movl $1, %%eax\n" +#ifdef __x86_64__ + "push %%rbx\n" +#else + "push %%ebx\n" +#endif + "cpuid\n" +#ifdef __x86_64__ + "pop %%rbx\n" +#else + "pop %%ebx\n" +#endif + : "=c" (capabilities): + : "%rax", "%rdx"); +#else asm volatile ("movl $1, %%eax\n" "cpuid" : "=c" (capabilities): - : "%eax", "%ebx", "%edx"); + : "%rax", "%rbx", "%rdx"); +#endif if (! (capabilities & (1 << 7))) return sane_value;