aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/exec/exec-all.h19
-rw-r--r--include/exec/softmmu_defs.h46
-rw-r--r--include/exec/softmmu_template.h309
-rw-r--r--include/hw/loader.h1
4 files changed, 155 insertions, 220 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 5920f73c90..ffb69a4c70 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -326,18 +326,7 @@ extern uintptr_t tci_tb_ptr;
(6) jump to corresponding code of the next of fast path
*/
# if defined(__i386__) || defined(__x86_64__)
-/* To avoid broken disassembling, long jmp is used for embedding fast path pc,
- so that the destination is the next code of fast path, though this jmp is
- never executed.
-
- call MMU helper
- jmp POST_PROC (2byte) <- GETRA()
- jmp NEXT_CODE (5byte)
- POST_PROCESS ... <- GETRA() + 7
- */
-# define GETRA() ((uintptr_t)__builtin_return_address(0))
-# define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \
- *(int32_t *)((void *)GETRA() + 3) - 1))
+# define GETPC_EXT() GETPC()
# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
# define GETRA() ((uintptr_t)__builtin_return_address(0))
# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
@@ -358,7 +347,7 @@ static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
not the start of the next opcode */
return ra;
}
-#elif defined(__aarch64__)
+# elif defined(__aarch64__)
# define GETRA() ((uintptr_t)__builtin_return_address(0))
# define GETPC_LDST() tcg_getpc_ldst(GETRA())
static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
@@ -376,7 +365,9 @@ static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
# endif
bool is_tcg_gen_code(uintptr_t pc_ptr);
-# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
+# ifndef GETPC_EXT
+# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
+# endif
#else
# define GETPC_EXT() GETPC()
#endif
diff --git a/include/exec/softmmu_defs.h b/include/exec/softmmu_defs.h
index 1f25e33ce4..e55e7178c6 100644
--- a/include/exec/softmmu_defs.h
+++ b/include/exec/softmmu_defs.h
@@ -9,29 +9,41 @@
#ifndef SOFTMMU_DEFS_H
#define SOFTMMU_DEFS_H
+uint8_t helper_ret_ldb_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+uint16_t helper_ret_ldw_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+uint32_t helper_ret_ldl_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx, uintptr_t retaddr);
+
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- int mmu_idx);
uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- int mmu_idx);
uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- int mmu_idx);
+
+void helper_stb_mmu(CPUArchState *env, target_ulong addr,
+ uint8_t val, int mmu_idx);
+void helper_stw_mmu(CPUArchState *env, target_ulong addr,
+ uint16_t val, int mmu_idx);
+void helper_stl_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t val, int mmu_idx);
+void helper_stq_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t val, int mmu_idx);
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
-int mmu_idx);
uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
- int mmu_idx);
uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
- int mmu_idx);
uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
- int mmu_idx);
-#endif
+
+#endif /* SOFTMMU_DEFS_H */
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
index 8584902cbe..eaca9e1035 100644
--- a/include/exec/softmmu_template.h
+++ b/include/exec/softmmu_template.h
@@ -54,10 +54,6 @@
#define ADDR_READ addr_read
#endif
-static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr,
- int mmu_idx,
- uintptr_t retaddr);
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
target_ulong addr,
@@ -78,123 +74,86 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
}
/* handle all cases except unaligned access which span two pages */
+#ifdef SOFTMMU_CODE_ACCESS
+static
+#endif
DATA_TYPE
-glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
- int mmu_idx)
+glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr, int mmu_idx,
+ uintptr_t retaddr)
{
- DATA_TYPE res;
- int index;
- target_ulong tlb_addr;
- hwaddr ioaddr;
- uintptr_t retaddr;
+ int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ uintptr_t haddr;
- /* test if there is match for unaligned or IO access */
- /* XXX: could done more in memory macro in a non portable way */
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
- if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- retaddr = GETPC_EXT();
- ioaddr = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
- } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
- /* slow unaligned access (it spans two pages or IO) */
- do_unaligned_access:
- retaddr = GETPC_EXT();
+ /* If the TLB entry is for a different page, reload and try again. */
+ if ((addr & TARGET_PAGE_MASK)
+ != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
-#endif
- res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
- mmu_idx, retaddr);
- } else {
- /* unaligned/aligned access in the same page */
- uintptr_t addend;
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- retaddr = GETPC_EXT();
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- }
-#endif
- addend = env->tlb_table[mmu_idx][index].addend;
- res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
- (addr + addend));
}
- } else {
- /* the page is not in the TLB : fill it */
- retaddr = GETPC_EXT();
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0)
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
#endif
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- goto redo;
+ tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
- return res;
-}
-/* handle all unaligned cases */
-static DATA_TYPE
-glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr,
- int mmu_idx,
- uintptr_t retaddr)
-{
- DATA_TYPE res, res1, res2;
- int index, shift;
- hwaddr ioaddr;
- target_ulong tlb_addr, addr1, addr2;
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ hwaddr ioaddr;
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+ ioaddr = env->iotlb[mmu_idx][index];
+ return glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+ }
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
- if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- ioaddr = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
- } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
- do_unaligned_access:
- /* slow unaligned access (it spans two pages) */
- addr1 = addr & ~(DATA_SIZE - 1);
- addr2 = addr1 + DATA_SIZE;
- res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
- mmu_idx, retaddr);
- res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
- mmu_idx, retaddr);
- shift = (addr & (DATA_SIZE - 1)) * 8;
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (DATA_SIZE > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
+ >= TARGET_PAGE_SIZE)) {
+ target_ulong addr1, addr2;
+ DATA_TYPE res1, res2, res;
+ unsigned shift;
+ do_unaligned_access:
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+#endif
+ addr1 = addr & ~(DATA_SIZE - 1);
+ addr2 = addr1 + DATA_SIZE;
+ res1 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr1,
+ mmu_idx, retaddr);
+ res2 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr2,
+ mmu_idx, retaddr);
+ shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
- res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
+ res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
#else
- res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
+ res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
#endif
- res = (DATA_TYPE)res;
- } else {
- /* unaligned/aligned access in the same page */
- uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
- res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
- (addr + addend));
- }
- } else {
- /* the page is not in the TLB : fill it */
- tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- goto redo;
+ return res;
+ }
+
+ /* Handle aligned access or unaligned access in the same page. */
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
- return res;
+#endif
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+ return glue(glue(ld, USUFFIX), _raw)((uint8_t *)haddr);
}
-#ifndef SOFTMMU_CODE_ACCESS
+DATA_TYPE
+glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
+ int mmu_idx)
+{
+ return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
+ GETPC_EXT());
+}
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr,
- DATA_TYPE val,
- int mmu_idx,
- uintptr_t retaddr);
+#ifndef SOFTMMU_CODE_ACCESS
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
@@ -214,107 +173,79 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
io_mem_write(mr, physaddr, val, 1 << SHIFT);
}
-void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr, DATA_TYPE val,
- int mmu_idx)
+void
+glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr, DATA_TYPE val,
+ int mmu_idx, uintptr_t retaddr)
{
- hwaddr ioaddr;
- target_ulong tlb_addr;
- uintptr_t retaddr;
- int index;
+ int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ uintptr_t haddr;
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- retaddr = GETPC_EXT();
- ioaddr = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
- } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
- do_unaligned_access:
- retaddr = GETPC_EXT();
+ /* If the TLB entry is for a different page, reload and try again. */
+ if ((addr & TARGET_PAGE_MASK)
+ != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
-#endif
- glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
- mmu_idx, retaddr);
- } else {
- /* aligned/unaligned access in the same page */
- uintptr_t addend;
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- retaddr = GETPC_EXT();
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
- }
-#endif
- addend = env->tlb_table[mmu_idx][index].addend;
- glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
- (addr + addend), val);
}
- } else {
- /* the page is not in the TLB : fill it */
- retaddr = GETPC_EXT();
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0)
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
#endif
tlb_fill(env, addr, 1, mmu_idx, retaddr);
- goto redo;
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
-}
-/* handles all unaligned cases */
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr,
- DATA_TYPE val,
- int mmu_idx,
- uintptr_t retaddr)
-{
- hwaddr ioaddr;
- target_ulong tlb_addr;
- int index, i;
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ hwaddr ioaddr;
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+ ioaddr = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+ return;
+ }
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- ioaddr = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
- } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
- do_unaligned_access:
- /* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for(i = DATA_SIZE - 1; i >= 0; i--) {
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (DATA_SIZE > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
+ >= TARGET_PAGE_SIZE)) {
+ int i;
+ do_unaligned_access:
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+#endif
+ /* XXX: not efficient, but simple */
+ /* Note: relies on the fact that tlb_fill() does not remove the
+ * previous page from the TLB cache. */
+ for (i = DATA_SIZE - 1; i >= 0; i--) {
#ifdef TARGET_WORDS_BIGENDIAN
- glue(slow_stb, MMUSUFFIX)(env, addr + i,
- val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
- mmu_idx, retaddr);
+ uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
#else
- glue(slow_stb, MMUSUFFIX)(env, addr + i,
- val >> (i * 8),
- mmu_idx, retaddr);
+ uint8_t val8 = val >> (i * 8);
#endif
- }
- } else {
- /* aligned/unaligned access in the same page */
- uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
- glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
- (addr + addend), val);
+ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
+ mmu_idx, retaddr);
}
- } else {
- /* the page is not in the TLB : fill it */
- tlb_fill(env, addr, 1, mmu_idx, retaddr);
- goto redo;
+ return;
+ }
+
+ /* Handle aligned access or unaligned access in the same page. */
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
}
+#endif
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _raw)((uint8_t *)haddr, val);
+}
+
+void
+glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
+ DATA_TYPE val, int mmu_idx)
+{
+ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
+ GETPC_EXT());
}
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
diff --git a/include/hw/loader.h b/include/hw/loader.h
index eb9c9a3612..61457360f6 100644
--- a/include/hw/loader.h
+++ b/include/hw/loader.h
@@ -36,6 +36,7 @@ void pstrcpy_targphys(const char *name,
hwaddr dest, int buf_size,
const char *source);
+extern bool rom_file_in_ram;
int rom_add_file(const char *file, const char *fw_dir,
hwaddr addr, int32_t bootindex);