aboutsummaryrefslogtreecommitdiff
path: root/include/exec/softmmu_exec.h
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-06-05 21:06:13 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-06-05 21:06:14 +0100
commit31e25e3e5701607a2a88b5b6c5fb1057b20941fd (patch)
tree757e835a8cef83a5e28f5e99704854836ca5df62 /include/exec/softmmu_exec.h
parent9d48d3f01cf3f67d54cd7e2c7834e97a57cea0b8 (diff)
parent16b96f82cdfcb185560c2f8ebfc731711e2ccb2d (diff)
Merge remote-tracking branch 'remotes/bonzini/softmmu-smap' into staging
* remotes/bonzini/softmmu-smap: (33 commits) target-i386: cleanup x86_cpu_get_phys_page_debug target-i386: fix protection bits in the TLB for SMEP target-i386: support long addresses for 4MB pages (PSE-36) target-i386: raise page fault for reserved bits in large pages target-i386: unify reserved bits and NX bit check target-i386: simplify pte/vaddr calculation target-i386: raise page fault for reserved physical address bits target-i386: test reserved PS bit on PML4Es target-i386: set correct error code for reserved bit access target-i386: introduce support for 1 GB pages target-i386: introduce do_check_protect label target-i386: tweak handling of PG_NX_MASK target-i386: commonize checks for PAE and non-PAE target-i386: commonize checks for 4MB and 4KB pages target-i386: commonize checks for 2MB and 4KB pages target-i386: fix coding standards in x86_cpu_handle_mmu_fault target-i386: simplify SMAP handling in MMU_KSMAP_IDX target-i386: fix kernel accesses with SMAP and CPL = 3 target-i386: move check_io helpers to seg_helper.c target-i386: rename KSMAP to KNOSMAP ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include/exec/softmmu_exec.h')
-rw-r--r--include/exec/softmmu_exec.h216
1 files changed, 0 insertions, 216 deletions
diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h
deleted file mode 100644
index 470db20174..0000000000
--- a/include/exec/softmmu_exec.h
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Software MMU support
- *
- * Generate inline load/store functions for all MMU modes (typically
- * at least _user and _kernel) as well as _data versions, for all data
- * sizes.
- *
- * Used by target op helpers.
- *
- * MMU mode suffixes are defined in target cpu.h.
- */
-
-/* XXX: find something cleaner.
- * Furthermore, this is false for 64 bits targets
- */
-#define ldul_user ldl_user
-#define ldul_kernel ldl_kernel
-#define ldul_hypv ldl_hypv
-#define ldul_executive ldl_executive
-#define ldul_supervisor ldl_supervisor
-
-/* The memory helpers for tcg-generated code need tcg_target_long etc. */
-#include "tcg.h"
-
-#define ACCESS_TYPE 0
-#define MEMSUFFIX MMU_MODE0_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#define ACCESS_TYPE 1
-#define MEMSUFFIX MMU_MODE1_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#if (NB_MMU_MODES >= 3)
-
-#define ACCESS_TYPE 2
-#define MEMSUFFIX MMU_MODE2_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 3) */
-
-#if (NB_MMU_MODES >= 4)
-
-#define ACCESS_TYPE 3
-#define MEMSUFFIX MMU_MODE3_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 4) */
-
-#if (NB_MMU_MODES >= 5)
-
-#define ACCESS_TYPE 4
-#define MEMSUFFIX MMU_MODE4_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 5) */
-
-#if (NB_MMU_MODES >= 6)
-
-#define ACCESS_TYPE 5
-#define MEMSUFFIX MMU_MODE5_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 6) */
-
-#if (NB_MMU_MODES > 6)
-#error "NB_MMU_MODES > 6 is not supported for now"
-#endif /* (NB_MMU_MODES > 6) */
-
-/* these access are slower, they must be as rare as possible */
-#define ACCESS_TYPE (NB_MMU_MODES)
-#define MEMSUFFIX _data
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#define ldub(p) ldub_data(p)
-#define ldsb(p) ldsb_data(p)
-#define lduw(p) lduw_data(p)
-#define ldsw(p) ldsw_data(p)
-#define ldl(p) ldl_data(p)
-#define ldq(p) ldq_data(p)
-
-#define stb(p, v) stb_data(p, v)
-#define stw(p, v) stw_data(p, v)
-#define stl(p, v) stl_data(p, v)
-#define stq(p, v) stq_data(p, v)
-
-/**
- * tlb_vaddr_to_host:
- * @env: CPUArchState
- * @addr: guest virtual address to look up
- * @access_type: 0 for read, 1 for write, 2 for execute
- * @mmu_idx: MMU index to use for lookup
- *
- * Look up the specified guest virtual index in the TCG softmmu TLB.
- * If the TLB contains a host virtual address suitable for direct RAM
- * access, then return it. Otherwise (TLB miss, TLB entry is for an
- * I/O access, etc) return NULL.
- *
- * This is the equivalent of the initial fast-path code used by
- * TCG backends for guest load and store accesses.
- */
-static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
- int access_type, int mmu_idx)
-{
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
- target_ulong tlb_addr;
- uintptr_t haddr;
-
- switch (access_type) {
- case 0:
- tlb_addr = tlbentry->addr_read;
- break;
- case 1:
- tlb_addr = tlbentry->addr_write;
- break;
- case 2:
- tlb_addr = tlbentry->addr_code;
- break;
- default:
- g_assert_not_reached();
- }
-
- if ((addr & TARGET_PAGE_MASK)
- != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- /* TLB entry is for a different page */
- return NULL;
- }
-
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- return NULL;
- }
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
- return (void *)haddr;
-}