diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2014-06-05 21:06:13 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2014-06-05 21:06:14 +0100 |
commit | 31e25e3e5701607a2a88b5b6c5fb1057b20941fd (patch) | |
tree | 757e835a8cef83a5e28f5e99704854836ca5df62 /include/qom/cpu.h | |
parent | 9d48d3f01cf3f67d54cd7e2c7834e97a57cea0b8 (diff) | |
parent | 16b96f82cdfcb185560c2f8ebfc731711e2ccb2d (diff) |
Merge remote-tracking branch 'remotes/bonzini/softmmu-smap' into staging
* remotes/bonzini/softmmu-smap: (33 commits)
target-i386: cleanup x86_cpu_get_phys_page_debug
target-i386: fix protection bits in the TLB for SMEP
target-i386: support long addresses for 4MB pages (PSE-36)
target-i386: raise page fault for reserved bits in large pages
target-i386: unify reserved bits and NX bit check
target-i386: simplify pte/vaddr calculation
target-i386: raise page fault for reserved physical address bits
target-i386: test reserved PS bit on PML4Es
target-i386: set correct error code for reserved bit access
target-i386: introduce support for 1 GB pages
target-i386: introduce do_check_protect label
target-i386: tweak handling of PG_NX_MASK
target-i386: commonize checks for PAE and non-PAE
target-i386: commonize checks for 4MB and 4KB pages
target-i386: commonize checks for 2MB and 4KB pages
target-i386: fix coding standards in x86_cpu_handle_mmu_fault
target-i386: simplify SMAP handling in MMU_KSMAP_IDX
target-i386: fix kernel accesses with SMAP and CPL = 3
target-i386: move check_io helpers to seg_helper.c
target-i386: rename KSMAP to KNOSMAP
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include/qom/cpu.h')
-rw-r--r-- | include/qom/cpu.h | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/include/qom/cpu.h b/include/qom/cpu.h index df977c88f0..4b352a28fa 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -80,6 +80,8 @@ struct TranslationBlock; * @has_work: Callback for checking if there is work to do. * @do_interrupt: Callback for interrupt handling. * @do_unassigned_access: Callback for unassigned access handling. + * @do_unaligned_access: Callback for unaligned access handling, if + * the target defines #ALIGNED_ONLY. * @memory_rw_debug: Callback for GDB memory access. * @dump_state: Callback for dumping state. * @dump_statistics: Callback for dumping statistics. @@ -112,6 +114,8 @@ typedef struct CPUClass { bool (*has_work)(CPUState *cpu); void (*do_interrupt)(CPUState *cpu); CPUUnassignedAccess do_unassigned_access; + void (*do_unaligned_access)(CPUState *cpu, vaddr addr, + int is_write, int is_user, uintptr_t retaddr); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, @@ -544,8 +548,7 @@ void cpu_interrupt(CPUState *cpu, int mask); #endif /* USER_ONLY */ -#ifndef CONFIG_USER_ONLY - +#ifdef CONFIG_SOFTMMU static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, bool is_write, bool is_exec, int opaque, unsigned size) @@ -557,6 +560,14 @@ static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, } } +static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, + int is_write, int is_user, + uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + return cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr); +} #endif /** |