diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2016-09-28 23:02:56 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2016-09-28 23:02:56 +0100 |
commit | c640f2849ee8775fe1bbd7a2772610aa77816f9f (patch) | |
tree | 023903f354052da654a366a42f40fb717c28673d /include | |
parent | bc63afaf5f6d906ff56608b52d575ac8dbb09062 (diff) | |
parent | 6d0ceb80ffe18ad4b28aab7356f440636c0be7be (diff) |
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* thread-safe tb_flush (Fred, Alex, Sergey, me, Richard, Emilio,... :-)
* license clarification for compiler.h (Felipe)
* glib cflags improvement (Marc-André)
* checkpatch silencing (Paolo)
* SMRAM migration fix (Paolo)
* Replay improvements (Pavel)
* IOMMU notifier improvements (Peter)
* IOAPIC now defaults to version 0x20 (Peter)
# gpg: Signature made Tue 27 Sep 2016 10:57:40 BST
# gpg: using RSA key 0xBFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1
# Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83
* remotes/bonzini/tags/for-upstream: (28 commits)
replay: allow replay stopping and restarting
replay: vmstate for replay module
replay: move internal data to the structure
cpus-common: lock-free fast path for cpu_exec_start/end
tcg: Make tb_flush() thread safe
cpus-common: Introduce async_safe_run_on_cpu()
cpus-common: simplify locking for start_exclusive/end_exclusive
cpus-common: remove redundant call to exclusive_idle()
cpus-common: always defer async_run_on_cpu work items
docs: include formal model for TCG exclusive sections
cpus-common: move exclusive work infrastructure from linux-user
cpus-common: fix uninitialized variable use in run_on_cpu
cpus-common: move CPU work item management to common code
cpus-common: move CPU list management to common code
linux-user: Add qemu_cpu_is_self() and qemu_cpu_kick()
linux-user: Use QemuMutex and QemuCond
cpus: Rename flush_queued_work()
cpus: Move common code out of {async_, }run_on_cpu()
cpus: pass CPUState to run_on_cpu helpers
build-sys: put glib_cflags in QEMU_CFLAGS
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/exec/cpu-common.h | 5 | ||||
-rw-r--r-- | include/exec/exec-all.h | 11 | ||||
-rw-r--r-- | include/exec/memory.h | 63 | ||||
-rw-r--r-- | include/exec/tb-context.h | 2 | ||||
-rw-r--r-- | include/hw/compat.h | 4 | ||||
-rw-r--r-- | include/hw/vfio/vfio-common.h | 2 | ||||
-rw-r--r-- | include/qemu/compiler.h | 6 | ||||
-rw-r--r-- | include/qom/cpu.h | 102 | ||||
-rw-r--r-- | include/sysemu/replay.h | 4 |
9 files changed, 157 insertions, 42 deletions
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 952bcfeb4c..869ba41b0c 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -23,6 +23,11 @@ typedef struct CPUListState { FILE *file; } CPUListState; +/* The CPU list lock nests outside tb_lock/tb_unlock. */ +void qemu_init_cpu_list(void); +void cpu_list_lock(void); +void cpu_list_unlock(void); + #if !defined(CONFIG_USER_ONLY) enum device_endian { diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 008e09a3c1..336a57cde6 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -56,17 +56,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, int cflags); -#if defined(CONFIG_USER_ONLY) -void cpu_list_lock(void); -void cpu_list_unlock(void); -#else -static inline void cpu_list_unlock(void) -{ -} -static inline void cpu_list_lock(void) -{ -} -#endif void cpu_exec_init(CPUState *cpu, Error **errp); void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); diff --git a/include/exec/memory.h b/include/exec/memory.h index 3e4d4164cd..10d7eacc40 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -67,6 +67,27 @@ struct IOMMUTLBEntry { IOMMUAccessFlags perm; }; +/* + * Bitmap for different IOMMUNotifier capabilities. Each notifier can + * register with one or multiple IOMMU Notifier capability bit(s). + */ +typedef enum { + IOMMU_NOTIFIER_NONE = 0, + /* Notify cache invalidations */ + IOMMU_NOTIFIER_UNMAP = 0x1, + /* Notify entry changes (newly created entries) */ + IOMMU_NOTIFIER_MAP = 0x2, +} IOMMUNotifierFlag; + +#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) + +struct IOMMUNotifier { + void (*notify)(struct IOMMUNotifier *notifier, IOMMUTLBEntry *data); + IOMMUNotifierFlag notifier_flags; + QLIST_ENTRY(IOMMUNotifier) node; +}; +typedef struct IOMMUNotifier IOMMUNotifier; + /* New-style MMIO accessors can indicate that the transaction failed. * A zero (MEMTX_OK) response means success; anything else is a failure * of some kind. The memory subsystem will bitwise-OR together results @@ -153,10 +174,10 @@ struct MemoryRegionIOMMUOps { IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); /* Returns minimum supported page size */ uint64_t (*get_min_page_size)(MemoryRegion *iommu); - /* Called when the first notifier is set */ - void (*notify_started)(MemoryRegion *iommu); - /* Called when the last notifier is removed */ - void (*notify_stopped)(MemoryRegion *iommu); + /* Called when IOMMU Notifier flag changed */ + void (*notify_flag_changed)(MemoryRegion *iommu, + IOMMUNotifierFlag old_flags, + IOMMUNotifierFlag new_flags); }; typedef struct CoalescedMemoryRange CoalescedMemoryRange; @@ -201,7 +222,8 @@ struct MemoryRegion { const char *name; unsigned ioeventfd_nb; MemoryRegionIoeventfd *ioeventfds; - NotifierList iommu_notify; + QLIST_HEAD(, IOMMUNotifier) iommu_notify; + IOMMUNotifierFlag iommu_notify_flags; }; /** @@ -607,6 +629,15 @@ uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr); /** * memory_region_notify_iommu: notify a change in an IOMMU translation entry. * + * The notification type will be decided by entry.perm bits: + * + * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE. + * - For MAP (newly added entry) notifies: set entry.perm to the + * permission of the page (which is definitely !IOMMU_NONE). + * + * Note: for any IOMMU implementation, an in-place mapping change + * should be notified with an UNMAP followed by a MAP. + * * @mr: the memory region that was changed * @entry: the new entry in the IOMMU translation table. The entry * replaces all old entries for the same virtual I/O address range. @@ -620,11 +651,12 @@ void memory_region_notify_iommu(MemoryRegion *mr, * IOMMU translation entries. * * @mr: the memory region to observe - * @n: the notifier to be added; the notifier receives a pointer to an - * #IOMMUTLBEntry as the opaque value; the pointer ceases to be - * valid on exit from the notifier. + * @n: the IOMMUNotifier to be added; the notify callback receives a + * pointer to an #IOMMUTLBEntry as the opaque value; the pointer + * ceases to be valid on exit from the notifier. */ -void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); +void memory_region_register_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n); /** * memory_region_iommu_replay: replay existing IOMMU translations to @@ -636,7 +668,8 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); * @is_write: Whether to treat the replay as a translate "write" * through the iommu */ -void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write); +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n, + bool is_write); /** * memory_region_unregister_iommu_notifier: unregister a notifier for @@ -646,7 +679,8 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write); * needs to be called * @n: the notifier to be removed. */ -void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n); +void memory_region_unregister_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n); /** * memory_region_name: get a memory region's name @@ -1154,12 +1188,11 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr, hwaddr addr, uint64_t size); /** - * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory + * memory_global_dirty_log_sync: synchronize the dirty log for all memory * - * Synchronizes the dirty page log for an entire address space. - * @as: the address space that contains the memory being synchronized + * Synchronizes the dirty page log for all address spaces. */ -void address_space_sync_dirty_bitmap(AddressSpace *as); +void memory_global_dirty_log_sync(void); /** * memory_region_transaction_begin: Start a transaction. diff --git a/include/exec/tb-context.h b/include/exec/tb-context.h index dce95d92d6..c7f17f26e0 100644 --- a/include/exec/tb-context.h +++ b/include/exec/tb-context.h @@ -38,7 +38,7 @@ struct TBContext { QemuMutex tb_lock; /* statistics */ - int tb_flush_count; + unsigned tb_flush_count; int tb_phys_invalidate_count; }; diff --git a/include/hw/compat.h b/include/hw/compat.h index a1d6694492..46412b229a 100644 --- a/include/hw/compat.h +++ b/include/hw/compat.h @@ -6,6 +6,10 @@ .driver = "virtio-pci",\ .property = "page-per-vq",\ .value = "on",\ + },{\ + .driver = "ioapic",\ + .property = "version",\ + .value = "0x11",\ }, #define HW_COMPAT_2_6 \ diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index 94dfae387a..c17602e533 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -93,7 +93,7 @@ typedef struct VFIOGuestIOMMU { VFIOContainer *container; MemoryRegion *iommu; hwaddr iommu_offset; - Notifier n; + IOMMUNotifier n; QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; } VFIOGuestIOMMU; diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index 338d3a65b3..157698bfa9 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -1,4 +1,8 @@ -/* public domain */ +/* compiler.h: macros to abstract away compiler specifics + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ #ifndef COMPILER_H #define COMPILER_H diff --git a/include/qom/cpu.h b/include/qom/cpu.h index ce0c406f27..22b54d6d93 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -232,13 +232,8 @@ struct kvm_run; #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) /* work queue */ -struct qemu_work_item { - struct qemu_work_item *next; - void (*func)(void *data); - void *data; - int done; - bool free; -}; +typedef void (*run_on_cpu_func)(CPUState *cpu, void *data); +struct qemu_work_item; /** * CPUState: @@ -247,7 +242,9 @@ struct qemu_work_item { * @nr_threads: Number of threads within this CPU. * @numa_node: NUMA node this CPU is belonging to. * @host_tid: Host thread ID. - * @running: #true if CPU is currently running (usermode). + * @running: #true if CPU is currently running (lockless). + * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end; + * valid under cpu_list_lock. * @created: Indicates whether the CPU thread has been successfully created. * @interrupt_request: Indicates a pending interrupt request. * @halted: Nonzero if the CPU is in suspended state. @@ -257,7 +254,6 @@ struct qemu_work_item { * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this * CPU and return to its top level loop. - * @tb_flushed: Indicates the translation buffer has been flushed. * @singlestep_enabled: Flags for single-stepping. * @icount_extra: Instructions until next timer event. * @icount_decr: Number of cycles left, with interrupt flag in high bit. @@ -301,7 +297,7 @@ struct CPUState { #endif int thread_id; uint32_t host_tid; - bool running; + bool running, has_waiter; struct QemuCond *halt_cond; bool thread_kicked; bool created; @@ -310,7 +306,6 @@ struct CPUState { bool unplug; bool crash_occurred; bool exit_request; - bool tb_flushed; uint32_t interrupt_request; int singlestep_enabled; int64_t icount_extra; @@ -543,6 +538,18 @@ static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) #endif /** + * cpu_list_add: + * @cpu: The CPU to be added to the list of CPUs. + */ +void cpu_list_add(CPUState *cpu); + +/** + * cpu_list_remove: + * @cpu: The CPU to be removed from the list of CPUs. + */ +void cpu_list_remove(CPUState *cpu); + +/** * cpu_reset: * @cpu: The CPU whose state is to be reset. */ @@ -616,6 +623,18 @@ void qemu_cpu_kick(CPUState *cpu); bool cpu_is_stopped(CPUState *cpu); /** + * do_run_on_cpu: + * @cpu: The vCPU to run on. + * @func: The function to be executed. + * @data: Data to pass to the function. + * @mutex: Mutex to release while waiting for @func to run. + * + * Used internally in the implementation of run_on_cpu. + */ +void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data, + QemuMutex *mutex); + +/** * run_on_cpu: * @cpu: The vCPU to run on. * @func: The function to be executed. @@ -623,7 +642,7 @@ bool cpu_is_stopped(CPUState *cpu); * * Schedules the function @func for execution on the vCPU @cpu. */ -void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); +void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); /** * async_run_on_cpu: @@ -633,7 +652,21 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); * * Schedules the function @func for execution on the vCPU @cpu asynchronously. */ -void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); +void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); + +/** + * async_safe_run_on_cpu: + * @cpu: The vCPU to run on. + * @func: The function to be executed. + * @data: Data to pass to the function. + * + * Schedules the function @func for execution on the vCPU @cpu asynchronously, + * while all other vCPUs are sleeping. + * + * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the + * BQL. + */ +void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); /** * qemu_get_cpu: @@ -794,6 +827,49 @@ void cpu_remove(CPUState *cpu); void cpu_remove_sync(CPUState *cpu); /** + * process_queued_cpu_work() - process all items on CPU work queue + * @cpu: The CPU which work queue to process. + */ +void process_queued_cpu_work(CPUState *cpu); + +/** + * cpu_exec_start: + * @cpu: The CPU for the current thread. + * + * Record that a CPU has started execution and can be interrupted with + * cpu_exit. + */ +void cpu_exec_start(CPUState *cpu); + +/** + * cpu_exec_end: + * @cpu: The CPU for the current thread. + * + * Record that a CPU has stopped execution and exclusive sections + * can be executed without interrupting it. + */ +void cpu_exec_end(CPUState *cpu); + +/** + * start_exclusive: + * + * Wait for a concurrent exclusive section to end, and then start + * a section of work that is run while other CPUs are not running + * between cpu_exec_start and cpu_exec_end. CPUs that are running + * cpu_exec are exited immediately. CPUs that call cpu_exec_start + * during the exclusive section go to sleep until this CPU calls + * end_exclusive. + */ +void start_exclusive(void); + +/** + * end_exclusive: + * + * Concludes an exclusive execution section started by start_exclusive. + */ +void end_exclusive(void); + +/** * qemu_init_vcpu: * @cpu: The vCPU to initialize. * diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index 0a88393d2b..f80d6d28e8 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -105,6 +105,8 @@ bool replay_checkpoint(ReplayCheckpoint checkpoint); /*! Disables storing events in the queue */ void replay_disable_events(void); +/*! Enables storing events in the queue */ +void replay_enable_events(void); /*! Returns true when saving events is enabled */ bool replay_events_enabled(void); /*! Adds bottom half event to the queue */ @@ -115,6 +117,8 @@ void replay_input_event(QemuConsole *src, InputEvent *evt); void replay_input_sync_event(void); /*! Adds block layer event to the queue */ void replay_block_event(QEMUBH *bh, uint64_t id); +/*! Returns ID for the next block event */ +uint64_t blkreplay_next_id(void); /* Character device */ |