/* * MMU implementation using indirection table (map) * * Copyright (c) 2004 Piotr Krysiuk * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if DATA_SIZE == 4 #define SUFFIX l #define USUFFIX l #define DATA_TYPE uint32_t #elif DATA_SIZE == 2 #define SUFFIX w #define USUFFIX uw #define DATA_TYPE uint16_t #define DATA_STYPE int16_t #elif DATA_SIZE == 1 #define SUFFIX b #define USUFFIX ub #define DATA_TYPE uint8_t #define DATA_STYPE int8_t #else #error unsupported data size #endif #if ACCESS_TYPE == 0 #define CPU_MEM_INDEX 0 #elif ACCESS_TYPE == 1 #define CPU_MEM_INDEX 1 #else #error invalid ACCESS_TYPE #endif #define RES_TYPE int #if defined(__i386__) static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr) { RES_TYPE val; asm volatile ( "mov %1, %%eax\n" "shr %2, %%eax\n" "mov %4 (%3, %%eax, 4), %%eax\n" #if DATA_SIZE == 1 "movzbl (%1, %%eax, 1), %0\n" #elif DATA_SIZE == 2 "movzwl (%1, %%eax, 1), %0\n" #elif DATA_SIZE == 4 "movl (%1, %%eax, 1), %0\n" #else #error unsupported size #endif : "=r" (val) : "r" (ptr), "I" (TARGET_PAGE_BITS), "r" (env), "m" (*(char *)offsetof(CPUState, mmu_map[CPU_MEM_INDEX].add_read)) : "%eax", "memory", "cc"); return val; } #if DATA_SIZE <= 2 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr) { int val; asm volatile ( "mov %1, %%eax\n" "shr %2, %%eax\n" "mov %4 (%3, %%eax, 4), %%eax\n" #if DATA_SIZE == 1 "movsbl (%1, %%eax, 1), %0\n" #elif DATA_SIZE == 2 "movswl (%1, %%eax, 1), %0\n" #else #error unsupported size #endif : "=r" (val) : "r" (ptr), "I" (TARGET_PAGE_BITS), "r" (env), "m" (*(char *)offsetof(CPUState, mmu_map[CPU_MEM_INDEX].add_read)) : "%eax", "memory", "cc"); return val; } #endif static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE val) { asm volatile ( "mov %0, %%eax\n" "shr %2, %%eax\n" "mov %4 (%3, %%eax, 4), %%eax\n" #if DATA_SIZE == 1 "movb %b1, (%0, %%eax, 1)\n" #elif DATA_SIZE == 2 "movw %w1, (%0, %%eax, 1)\n" #elif DATA_SIZE == 4 "movl %1, (%0, %%eax, 1)\n" #else #error unsupported size #endif : : "r" (ptr), "r" (val), "I" (TARGET_PAGE_BITS), "r" (env), "m" (*(char *)offsetof(CPUState, mmu_map[CPU_MEM_INDEX].add_write)) : "%eax", "memory", "cc"); } #else #if defined(__powerpc__) /* this code assumes that the host ppc is running in big endian mode */ #ifdef TARGET_I386 #define ENDIAN_SWAP #endif #if 0 /* slow lookup, first shift right, then left */ "rlwinm 3, %1, 32 - %2, %2, 31\n" "addis 4, %3, address@hidden" "rlwinm 3, 3, 2, 0, 31 - 2\n" "addi 4, 4, address@hidden" "lwzx 3, 4, 3\n" /* current lookup, one shift and mask operation */ "addis 4, %3, address@hidden" "rlwinm 3, %1, 32 - (%2 - 2), (%2 - 2), 31 - 2\n" "addi 4, 4, address@hidden" "lwzx 3, 4, 3\n" /* better lookup, one shift and mask operation, mmu_map in register */ "rlwinm 3, %1, 32 - (%2 - 2), (%2 - 2), 31 - 2\n" "lwzx 3, %3, 3\n" #endif static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr) { RES_TYPE val; asm volatile ( "addis r4, %3, ha16(%4)\n" "rlwinm r3, %1, 32 - (%2 - 2), (%2 - 2), 31 - 2\n" "addi r4, r4, lo16(%4)\n" "lwzx r3, r4, r3\n" #if DATA_SIZE == 1 "lbzx %0, %1, r3\n" #elif DATA_SIZE == 2 #ifdef ENDIAN_SWAP "lhbrx %0, %1, r3\n" #else "lhzx %0, %1, r3\n" #endif #elif DATA_SIZE == 4 #ifdef ENDIAN_SWAP "lwbrx %0, %1, r3\n" #else "lwzx %0, %1, r3\n" #endif #else #error unsupported size #endif : "=r" (val) : "r" (ptr), "I" (TARGET_PAGE_BITS), "r" (env), "i" (offsetof(CPUState, mmu_map[CPU_MEM_INDEX].add_read)) : "%r3", "%r4", "memory"); return val; } #if DATA_SIZE <= 2 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr) { int val; asm volatile ( "addis r4, %3, ha16(%4)\n" "rlwinm r3, %1, 32 - (%2 - 2), (%2 - 2), 31 - 2\n" "addi r4, r4, lo16(%4)\n" "lwzx r3, r4, r3\n" #if DATA_SIZE == 1 "lbzx r4, %1, r3\n" "extsb %0, r4\n" #elif DATA_SIZE == 2 #ifdef ENDIAN_SWAP "lhbrx r4, %1, r3\n" "extsh %0, r4\n" #else "lhax %0, %1, r3\n" #endif #else #error unsupported size #endif : "=r" (val) : "r" (ptr), "I" (TARGET_PAGE_BITS), "r" (env), "i" (offsetof(CPUState, mmu_map[CPU_MEM_INDEX].add_read)) : "%r3", "%r4", "memory"); return val; } #endif static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE val) { asm volatile ( "addis r4, %3, ha16(%4)\n" "rlwinm r3, %1, 32 - (%2 - 2), (%2 - 2), 31 - 2\n" "addi r4, r4, lo16(%4)\n" "lwzx r3, r4, r3\n" #if DATA_SIZE == 1 "stbx %0, %1, r3\n" #elif DATA_SIZE == 2 #ifdef ENDIAN_SWAP "sthbrx %0, %1, r3\n" #else "sthx %0, %1, r3\n" #endif #elif DATA_SIZE == 4 #ifdef ENDIAN_SWAP "stwbrx %0, %1, r3\n" #else "stwx %0, %1, r3\n" #endif #else #error unsupported size #endif : : "r" (val), "r" (ptr), "I" (TARGET_PAGE_BITS), "r" (env), "i" (offsetof(CPUState, mmu_map[CPU_MEM_INDEX].add_write)) : "%r3", "%r4", "memory"); } #else #error unsupported host CPU #endif #endif #undef RES_TYPE #undef DATA_TYPE #undef DATA_STYPE #undef SUFFIX #undef USUFFIX #undef DATA_SIZE #undef CPU_MEM_INDEX