diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/exec/exec-all.h | 23 | ||||
-rw-r--r-- | include/exec/memory.h | 13 | ||||
-rw-r--r-- | include/hw/vfio/vfio-common.h | 23 | ||||
-rw-r--r-- | include/hw/vfio/vfio-platform.h | 4 | ||||
-rw-r--r-- | include/qemu/timer.h | 20 | ||||
-rw-r--r-- | include/qom/cpu.h | 16 |
6 files changed, 59 insertions, 40 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index a3719b7f0d..a63fd6015e 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -62,24 +62,15 @@ typedef struct TranslationBlock TranslationBlock; #define OPC_BUF_SIZE 640 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) -/* Maximum size a TCG op can expand to. This is complicated because a - single op may require several host instructions and register reloads. - For now take a wild guess at 192 bytes, which should allow at least - a couple of fixup instructions per argument. */ -#define TCG_MAX_OP_SIZE 192 - #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) #include "qemu/log.h" void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); -void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, - int pc_pos); + target_ulong *data); void cpu_gen_init(void); -int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb, - int *gen_code_size_ptr); bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); void page_size_init(void); @@ -170,13 +161,14 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...) #define CODE_GEN_PHYS_HASH_BITS 15 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) -/* estimated block size for TB allocation */ -/* XXX: use a per code average code fragment size and modulate it - according to the host CPU */ +/* Estimated block size for TB allocation. */ +/* ??? The following is based on a 2015 survey of x86_64 host output. + Better would seem to be some sort of dynamically sized TB array, + adapting to the block sizes actually being produced. */ #if defined(CONFIG_SOFTMMU) -#define CODE_GEN_AVG_BLOCK_SIZE 128 +#define CODE_GEN_AVG_BLOCK_SIZE 400 #else -#define CODE_GEN_AVG_BLOCK_SIZE 64 +#define CODE_GEN_AVG_BLOCK_SIZE 150 #endif #if defined(__arm__) || defined(_ARCH_PPC) \ @@ -201,6 +193,7 @@ struct TranslationBlock { #define CF_USE_ICOUNT 0x20000 void *tc_ptr; /* pointer to the translated code */ + uint8_t *tc_search; /* pointer to search data */ /* next matching tb for physical address. */ struct TranslationBlock *phys_hash_next; /* original tb when cflags has CF_NOCACHE */ diff --git a/include/exec/memory.h b/include/exec/memory.h index 5baaf48234..0f07159bb4 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -583,6 +583,19 @@ void memory_region_notify_iommu(MemoryRegion *mr, void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); /** + * memory_region_iommu_replay: replay existing IOMMU translations to + * a notifier + * + * @mr: the memory region to observe + * @n: the notifier to which to replay iommu mappings + * @granularity: Minimum page granularity to replay notifications for + * @is_write: Whether to treat the replay as a translate "write" + * through the iommu + */ +void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, + hwaddr granularity, bool is_write); + +/** * memory_region_unregister_iommu_notifier: unregister a notifier for * changes to IOMMU translation entries. * diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index 9b9901fbe2..f037f3c425 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -59,22 +59,19 @@ typedef struct VFIOAddressSpace { struct VFIOGroup; -typedef struct VFIOType1 { - MemoryListener listener; - int error; - bool initialized; -} VFIOType1; - typedef struct VFIOContainer { VFIOAddressSpace *space; int fd; /* /dev/vfio/vfio, empowered by the attached groups */ - struct { - /* enable abstraction to support various iommu backends */ - union { - VFIOType1 type1; - }; - void (*release)(struct VFIOContainer *); - } iommu_data; + MemoryListener listener; + int error; + bool initialized; + /* + * This assumes the host IOMMU can support only a single + * contiguous IOVA window. We may need to generalize that in + * future + */ + hwaddr min_iova, max_iova; + uint64_t iova_pgsizes; QLIST_HEAD(, VFIOGuestIOMMU) giommu_list; QLIST_HEAD(, VFIOGroup) group_list; QLIST_ENTRY(VFIOContainer) next; diff --git a/include/hw/vfio/vfio-platform.h b/include/hw/vfio/vfio-platform.h index c5cf1d79f3..b468f80b1e 100644 --- a/include/hw/vfio/vfio-platform.h +++ b/include/hw/vfio/vfio-platform.h @@ -34,8 +34,8 @@ enum { typedef struct VFIOINTp { QLIST_ENTRY(VFIOINTp) next; /* entry for IRQ list */ QSIMPLEQ_ENTRY(VFIOINTp) pqnext; /* entry for pending IRQ queue */ - EventNotifier interrupt; /* eventfd triggered on interrupt */ - EventNotifier unmask; /* eventfd for unmask on QEMU bypass */ + EventNotifier *interrupt; /* eventfd triggered on interrupt */ + EventNotifier *unmask; /* eventfd for unmask on QEMU bypass */ qemu_irq qemuirq; struct VFIOPlatformDevice *vdev; /* back pointer to device */ int state; /* inactive, pending, active */ diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 99392464a6..d0946cb953 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -857,7 +857,7 @@ int64_t cpu_icount_to_ns(int64_t icount); #if defined(_ARCH_PPC) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { int64_t retval; #ifdef _ARCH_PPC64 @@ -883,7 +883,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__i386__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { int64_t val; asm volatile ("rdtsc" : "=A" (val)); @@ -892,7 +892,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__x86_64__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { uint32_t low,high; int64_t val; @@ -905,7 +905,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__hppa__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { int val; asm volatile ("mfctl %%cr16, %0" : "=r"(val)); @@ -914,7 +914,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__ia64) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { int64_t val; asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); @@ -923,7 +923,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__s390__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { int64_t val; asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); @@ -932,7 +932,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__sparc__) -static inline int64_t cpu_get_real_ticks (void) +static inline int64_t cpu_get_host_ticks (void) { #if defined(_LP64) uint64_t rval; @@ -970,7 +970,7 @@ static inline int64_t cpu_get_real_ticks (void) : "=r" (value)); \ } -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */ uint32_t count; @@ -986,7 +986,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__alpha__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { uint64_t cc; uint32_t cur, ofs; @@ -1001,7 +1001,7 @@ static inline int64_t cpu_get_real_ticks(void) /* The host CPU doesn't have an easily accessible cycle counter. Just return a monotonically increasing value. This will be totally wrong, but hopefully better than nothing. */ -static inline int64_t cpu_get_real_ticks (void) +static inline int64_t cpu_get_host_ticks (void) { static int64_t ticks = 0; return ticks++; diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 9405554a2b..b613ff0329 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -721,6 +721,7 @@ void cpu_single_step(CPUState *cpu, int enabled); /* 0x08 currently unused */ #define BP_GDB 0x10 #define BP_CPU 0x20 +#define BP_ANY (BP_GDB | BP_CPU) #define BP_WATCHPOINT_HIT_READ 0x40 #define BP_WATCHPOINT_HIT_WRITE 0x80 #define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE) @@ -731,6 +732,21 @@ int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags); void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint); void cpu_breakpoint_remove_all(CPUState *cpu, int mask); +/* Return true if PC matches an installed breakpoint. */ +static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) +{ + CPUBreakpoint *bp; + + if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + if (bp->pc == pc && (bp->flags & mask)) { + return true; + } + } + } + return false; +} + int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, |