diff options
Diffstat (limited to 'include')
83 files changed, 1619 insertions, 621 deletions
diff --git a/include/block/aio.h b/include/block/aio.h index ca551e346f..7df271d2b9 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -44,6 +44,7 @@ void qemu_aio_ref(void *p); typedef struct AioHandler AioHandler; typedef void QEMUBHFunc(void *opaque); +typedef bool AioPollFn(void *opaque); typedef void IOHandler(void *opaque); struct ThreadPool; @@ -52,18 +53,12 @@ struct LinuxAioState; struct AioContext { GSource source; - /* Protects all fields from multi-threaded access */ + /* Used by AioContext users to protect from multi-threaded access. */ QemuRecMutex lock; - /* The list of registered AIO handlers */ + /* The list of registered AIO handlers. Protected by ctx->list_lock. */ QLIST_HEAD(, AioHandler) aio_handlers; - /* This is a simple lock used to protect the aio_handlers list. - * Specifically, it's used to ensure that no callbacks are removed while - * we're walking and dispatching callbacks. - */ - int walking_handlers; - /* Used to avoid unnecessary event_notifier_set calls in aio_notify; * accessed with atomic primitives. If this field is 0, everything * (file descriptors, bottom halves, timers) will be re-evaluated @@ -89,17 +84,15 @@ struct AioContext { */ uint32_t notify_me; - /* lock to protect between bh's adders and deleter */ - QemuMutex bh_lock; + /* A lock to protect between QEMUBH and AioHandler adders and deleter, + * and to ensure that no callbacks are removed while we're walking and + * dispatching them. + */ + QemuLockCnt list_lock; /* Anchor of the list of Bottom Halves belonging to the context */ struct QEMUBH *first_bh; - /* A simple lock used to protect the first_bh list, and ensure that - * no callbacks are removed while we're walking and dispatching callbacks. - */ - int walking_bh; - /* Used by aio_notify. * * "notified" is used to avoid expensive event_notifier_test_and_clear @@ -115,7 +108,9 @@ struct AioContext { bool notified; EventNotifier notifier; - /* Thread pool for performing work and receiving completion callbacks */ + /* Thread pool for performing work and receiving completion callbacks. + * Has its own locking. + */ struct ThreadPool *thread_pool; #ifdef CONFIG_LINUX_AIO @@ -125,11 +120,25 @@ struct AioContext { struct LinuxAioState *linux_aio; #endif - /* TimerLists for calling timers - one per clock type */ + /* TimerLists for calling timers - one per clock type. Has its own + * locking. + */ QEMUTimerListGroup tlg; int external_disable_cnt; + /* Number of AioHandlers without .io_poll() */ + int poll_disable_cnt; + + /* Polling mode parameters */ + int64_t poll_ns; /* current polling time in nanoseconds */ + int64_t poll_max_ns; /* maximum polling time in nanoseconds */ + int64_t poll_grow; /* polling time growth factor */ + int64_t poll_shrink; /* polling time shrink factor */ + + /* Are we in polling mode or monitoring file descriptors? */ + bool poll_started; + /* epoll(7) state used when built with CONFIG_EPOLL */ int epollfd; bool epoll_enabled; @@ -167,9 +176,11 @@ void aio_context_unref(AioContext *ctx); * automatically takes care of calling aio_context_acquire and * aio_context_release. * - * Access to timers and BHs from a thread that has not acquired AioContext - * is possible. Access to callbacks for now must be done while the AioContext - * is owned by the thread (FIXME). + * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A + * thread still has to call those to avoid being interrupted by the guest. + * + * Bottom halves, timers and callbacks can be created or removed without + * acquiring the AioContext. */ void aio_context_acquire(AioContext *ctx); @@ -295,8 +306,12 @@ bool aio_pending(AioContext *ctx); /* Dispatch any pending callbacks from the GSource attached to the AioContext. * * This is used internally in the implementation of the GSource. + * + * @dispatch_fds: true to process fds, false to skip them + * (can be used as an optimization by callers that know there + * are no fds ready) */ -bool aio_dispatch(AioContext *ctx); +bool aio_dispatch(AioContext *ctx, bool dispatch_fds); /* Progress in completing AIO work to occur. This can issue new pending * aio as a result of executing I/O completion or bh callbacks. @@ -325,8 +340,17 @@ void aio_set_fd_handler(AioContext *ctx, bool is_external, IOHandler *io_read, IOHandler *io_write, + AioPollFn *io_poll, void *opaque); +/* Set polling begin/end callbacks for a file descriptor that has already been + * registered with aio_set_fd_handler. Do nothing if the file descriptor is + * not registered. + */ +void aio_set_fd_poll(AioContext *ctx, int fd, + IOHandler *io_poll_begin, + IOHandler *io_poll_end); + /* Register an event notifier and associated callbacks. Behaves very similarly * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks * will be invoked when using aio_poll(). @@ -337,7 +361,17 @@ void aio_set_fd_handler(AioContext *ctx, void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, bool is_external, - EventNotifierHandler *io_read); + EventNotifierHandler *io_read, + AioPollFn *io_poll); + +/* Set polling begin/end callbacks for an event notifier that has already been + * registered with aio_set_event_notifier. Do nothing if the event notifier is + * not registered. + */ +void aio_set_event_notifier_poll(AioContext *ctx, + EventNotifier *notifier, + EventNotifierHandler *io_poll_begin, + EventNotifierHandler *io_poll_end); /* Return a GSource that lets the main loop poll the file descriptors attached * to this AioContext. @@ -474,4 +508,17 @@ static inline bool aio_context_in_iothread(AioContext *ctx) */ void aio_context_setup(AioContext *ctx); +/** + * aio_context_set_poll_params: + * @ctx: the aio context + * @max_ns: how long to busy poll for, in nanoseconds + * @grow: polling time growth factor + * @shrink: polling time shrink factor + * + * Poll mode can be disabled by setting poll_max_ns to 0. + */ +void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, + int64_t grow, int64_t shrink, + Error **errp); + #endif diff --git a/include/block/block.h b/include/block/block.h index 49bb0b239a..8b0dcdaa70 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -526,8 +526,6 @@ int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); void bdrv_io_plug(BlockDriverState *bs); void bdrv_io_unplug(BlockDriverState *bs); -void bdrv_io_unplugged_begin(BlockDriverState *bs); -void bdrv_io_unplugged_end(BlockDriverState *bs); /** * bdrv_drained_begin: diff --git a/include/block/block_int.h b/include/block/block_int.h index 83a423c580..2d92d7edfe 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -184,7 +184,7 @@ struct BlockDriver { /* * Flushes all data that was already written to the OS all the way down to - * the disk (for example raw-posix calls fsync()). + * the disk (for example file-posix.c calls fsync()). */ int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); @@ -526,9 +526,8 @@ struct BlockDriverState { uint64_t write_threshold_offset; NotifierWithReturn write_threshold_notifier; - /* counters for nested bdrv_io_plug and bdrv_io_unplugged_begin */ + /* counter for nested bdrv_io_plug */ unsigned io_plugged; - unsigned io_plug_disabled; int quiesce_counter; }; diff --git a/include/disas/bfd.h b/include/disas/bfd.h index 8a3488c2c5..0435b8c9f9 100644 --- a/include/disas/bfd.h +++ b/include/disas/bfd.h @@ -222,6 +222,10 @@ enum bfd_architecture bfd_arch_ia64, /* HP/Intel ia64 */ #define bfd_mach_ia64_elf64 64 #define bfd_mach_ia64_elf32 32 + bfd_arch_nios2, /* Nios II */ +#define bfd_mach_nios2 0 +#define bfd_mach_nios2r1 1 +#define bfd_mach_nios2r2 2 bfd_arch_lm32, /* Lattice Mico32 */ #define bfd_mach_lm32 1 bfd_arch_last @@ -415,6 +419,8 @@ int print_insn_crisv10 (bfd_vma, disassemble_info*); int print_insn_microblaze (bfd_vma, disassemble_info*); int print_insn_ia64 (bfd_vma, disassemble_info*); int print_insn_lm32 (bfd_vma, disassemble_info*); +int print_insn_big_nios2 (bfd_vma, disassemble_info*); +int print_insn_little_nios2 (bfd_vma, disassemble_info*); #if 0 /* Fetch the disassembler for a given BFD, if that support is available. */ diff --git a/include/elf.h b/include/elf.h index 1c2975dc82..0dbd3e968b 100644 --- a/include/elf.h +++ b/include/elf.h @@ -126,6 +126,8 @@ typedef int64_t Elf64_Sxword; */ #define EM_S390_OLD 0xA390 +#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ + #define EM_MICROBLAZE 189 #define EM_MICROBLAZE_OLD 0xBAAB diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index a8c13cee66..bbc9478a50 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -95,15 +95,13 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr); /** * tlb_flush: * @cpu: CPU whose TLB should be flushed - * @flush_global: ignored * - * Flush the entire TLB for the specified CPU. - * The flush_global flag is in theory an indicator of whether the whole - * TLB should be flushed, or only those entries not marked global. - * In practice QEMU does not implement any global/not global flag for - * TLB entries, and the argument is ignored. + * Flush the entire TLB for the specified CPU. Most CPU architectures + * allow the implementation to drop entries from the TLB at any time + * so this is generally safe. If more selective flushing is required + * use one of the other functions for efficiency. */ -void tlb_flush(CPUState *cpu, int flush_global); +void tlb_flush(CPUState *cpu); /** * tlb_flush_page_by_mmuidx: * @cpu: CPU whose TLB should be flushed @@ -165,7 +163,7 @@ static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) { } -static inline void tlb_flush(CPUState *cpu, int flush_global) +static inline void tlb_flush(CPUState *cpu) { } diff --git a/include/exec/memory.h b/include/exec/memory.h index 64560f61b4..a10044f08f 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -16,16 +16,12 @@ #ifndef CONFIG_USER_ONLY -#define DIRTY_MEMORY_VGA 0 -#define DIRTY_MEMORY_CODE 1 -#define DIRTY_MEMORY_MIGRATION 2 -#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */ - #include "exec/cpu-common.h" #ifndef CONFIG_USER_ONLY #include "exec/hwaddr.h" #endif #include "exec/memattrs.h" +#include "exec/ramlist.h" #include "qemu/queue.h" #include "qemu/int128.h" #include "qemu/notify.h" @@ -628,6 +624,9 @@ static inline bool memory_region_is_romd(MemoryRegion *mr) */ static inline bool memory_region_is_iommu(MemoryRegion *mr) { + if (mr->alias) { + return memory_region_is_iommu(mr->alias); + } return mr->iommu_ops; } @@ -1537,6 +1536,11 @@ void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); +/* address_space_get_iotlb_entry: translate an address into an IOTLB + * entry. Should be called from an RCU critical section. + */ +IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, + bool is_write); /* address_space_translate: translate an address range into an address space * into a MemoryRegion and an address range into that section. Should be diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 54d7108a9e..3e79466a44 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -21,6 +21,7 @@ #ifndef CONFIG_USER_ONLY #include "hw/xen/xen.h" +#include "exec/ramlist.h" struct RAMBlock { struct rcu_head rcu; @@ -35,6 +36,7 @@ struct RAMBlock { char idstr[256]; /* RCU-enabled, writes protected by the ramlist lock */ QLIST_ENTRY(RAMBlock) next; + QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers; int fd; size_t page_size; }; @@ -50,51 +52,7 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) return (char *)block->host + offset; } -/* The dirty memory bitmap is split into fixed-size blocks to allow growth - * under RCU. The bitmap for a block can be accessed as follows: - * - * rcu_read_lock(); - * - * DirtyMemoryBlocks *blocks = - * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]); - * - * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE; - * unsigned long *block = blocks.blocks[idx]; - * ...access block bitmap... - * - * rcu_read_unlock(); - * - * Remember to check for the end of the block when accessing a range of - * addresses. Move on to the next block if you reach the end. - * - * Organization into blocks allows dirty memory to grow (but not shrink) under - * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new - * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept - * the same. Other threads can safely access existing blocks while dirty - * memory is being grown. When no threads are using the old DirtyMemoryBlocks - * anymore it is freed by RCU (but the underlying blocks stay because they are - * pointed to from the new DirtyMemoryBlocks). - */ -#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8) -typedef struct { - struct rcu_head rcu; - unsigned long *blocks[]; -} DirtyMemoryBlocks; - -typedef struct RAMList { - QemuMutex mutex; - RAMBlock *mru_block; - /* RCU-enabled, writes protected by the ramlist lock. */ - QLIST_HEAD(, RAMBlock) blocks; - DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM]; - uint32_t version; -} RAMList; -extern RAMList ram_list; - ram_addr_t last_ram_offset(void); -void qemu_mutex_lock_ramlist(void); -void qemu_mutex_unlock_ramlist(void); - RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, bool share, const char *mem_path, Error **errp); diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h new file mode 100644 index 0000000000..c59880de82 --- /dev/null +++ b/include/exec/ramlist.h @@ -0,0 +1,72 @@ +#ifndef RAMLIST_H +#define RAMLIST_H + +#include "qemu/queue.h" +#include "qemu/thread.h" +#include "qemu/rcu.h" + +typedef struct RAMBlockNotifier RAMBlockNotifier; + +#define DIRTY_MEMORY_VGA 0 +#define DIRTY_MEMORY_CODE 1 +#define DIRTY_MEMORY_MIGRATION 2 +#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */ + +/* The dirty memory bitmap is split into fixed-size blocks to allow growth + * under RCU. The bitmap for a block can be accessed as follows: + * + * rcu_read_lock(); + * + * DirtyMemoryBlocks *blocks = + * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]); + * + * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE; + * unsigned long *block = blocks.blocks[idx]; + * ...access block bitmap... + * + * rcu_read_unlock(); + * + * Remember to check for the end of the block when accessing a range of + * addresses. Move on to the next block if you reach the end. + * + * Organization into blocks allows dirty memory to grow (but not shrink) under + * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new + * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept + * the same. Other threads can safely access existing blocks while dirty + * memory is being grown. When no threads are using the old DirtyMemoryBlocks + * anymore it is freed by RCU (but the underlying blocks stay because they are + * pointed to from the new DirtyMemoryBlocks). + */ +#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8) +typedef struct { + struct rcu_head rcu; + unsigned long *blocks[]; +} DirtyMemoryBlocks; + +typedef struct RAMList { + QemuMutex mutex; + RAMBlock *mru_block; + /* RCU-enabled, writes protected by the ramlist lock. */ + QLIST_HEAD(, RAMBlock) blocks; + DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM]; + uint32_t version; + QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers; +} RAMList; +extern RAMList ram_list; + +void qemu_mutex_lock_ramlist(void); +void qemu_mutex_unlock_ramlist(void); + +struct RAMBlockNotifier { + void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size); + void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size); + QLIST_ENTRY(RAMBlockNotifier) next; +}; + +void ram_block_notifier_add(RAMBlockNotifier *n); +void ram_block_notifier_remove(RAMBlockNotifier *n); +void ram_block_notify_add(void *host, size_t size); +void ram_block_notify_remove(void *host, size_t size); + + +#endif /* RAMLIST_H */ diff --git a/include/glib-compat.h b/include/glib-compat.h index acf254d2a0..0cd24ffbe9 100644 --- a/include/glib-compat.h +++ b/include/glib-compat.h @@ -39,7 +39,7 @@ static inline gint64 qemu_g_get_monotonic_time(void) #define g_get_monotonic_time() qemu_g_get_monotonic_time() #endif -#ifdef _WIN32 +#if defined(_WIN32) && !GLIB_CHECK_VERSION(2, 50, 0) /* * g_poll has a problem on Windows when using * timeouts < 10ms, so use wrapper. diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h index 154f3b82f6..4cc3630e61 100644 --- a/include/hw/acpi/acpi-defs.h +++ b/include/hw/acpi/acpi-defs.h @@ -191,10 +191,8 @@ struct AcpiFadtDescriptorRev5_1 { typedef struct AcpiFadtDescriptorRev5_1 AcpiFadtDescriptorRev5_1; -enum { - ACPI_FADT_ARM_USE_PSCI_G_0_2 = 0, - ACPI_FADT_ARM_PSCI_USE_HVC = 1, -}; +#define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0) +#define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1) /* * Serial Port Console Redirection Table (SPCR), Rev. 1.02 @@ -290,7 +288,7 @@ typedef struct AcpiMultipleApicTable AcpiMultipleApicTable; #define ACPI_APIC_XRUPT_SOURCE 8 #define ACPI_APIC_LOCAL_X2APIC 9 #define ACPI_APIC_LOCAL_X2APIC_NMI 10 -#define ACPI_APIC_GENERIC_INTERRUPT 11 +#define ACPI_APIC_GENERIC_CPU_INTERFACE 11 #define ACPI_APIC_GENERIC_DISTRIBUTOR 12 #define ACPI_APIC_GENERIC_MSI_FRAME 13 #define ACPI_APIC_GENERIC_REDISTRIBUTOR 14 @@ -361,7 +359,7 @@ struct AcpiMadtLocalX2ApicNmi { } QEMU_PACKED; typedef struct AcpiMadtLocalX2ApicNmi AcpiMadtLocalX2ApicNmi; -struct AcpiMadtGenericInterrupt { +struct AcpiMadtGenericCpuInterface { ACPI_SUB_HEADER_DEF uint16_t reserved; uint32_t cpu_interface_number; @@ -378,7 +376,10 @@ struct AcpiMadtGenericInterrupt { uint64_t arm_mpidr; } QEMU_PACKED; -typedef struct AcpiMadtGenericInterrupt AcpiMadtGenericInterrupt; +typedef struct AcpiMadtGenericCpuInterface AcpiMadtGenericCpuInterface; + +/* GICC CPU Interface Flags */ +#define ACPI_MADT_GICC_ENABLED 1 struct AcpiMadtGenericDistributor { ACPI_SUB_HEADER_DEF @@ -427,21 +428,9 @@ typedef struct AcpiMadtGenericTranslator AcpiMadtGenericTranslator; /* * Generic Timer Description Table (GTDT) */ - -#define ACPI_GTDT_INTERRUPT_MODE (1 << 0) -#define ACPI_GTDT_INTERRUPT_POLARITY (1 << 1) -#define ACPI_GTDT_ALWAYS_ON (1 << 2) - -/* Triggering */ - -#define ACPI_LEVEL_SENSITIVE ((uint8_t) 0x00) -#define ACPI_EDGE_SENSITIVE ((uint8_t) 0x01) - -/* Polarity */ - -#define ACPI_ACTIVE_HIGH ((uint8_t) 0x00) -#define ACPI_ACTIVE_LOW ((uint8_t) 0x01) -#define ACPI_ACTIVE_BOTH ((uint8_t) 0x02) +#define ACPI_GTDT_INTERRUPT_MODE_LEVEL (0 << 0) +#define ACPI_GTDT_INTERRUPT_MODE_EDGE (1 << 0) +#define ACPI_GTDT_CAP_ALWAYS_ON (1 << 2) struct AcpiGenericTimerTable { ACPI_TABLE_HEADER_DEF @@ -638,8 +627,20 @@ struct AcpiDmarHardwareUnit { } QEMU_PACKED; typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit; +/* Type 2: Root Port ATS Capability Reporting Structure */ +struct AcpiDmarRootPortATS { + uint16_t type; + uint16_t length; + uint8_t flags; + uint8_t reserved; + uint16_t pci_segment; + AcpiDmarDeviceScope scope[0]; +} QEMU_PACKED; +typedef struct AcpiDmarRootPortATS AcpiDmarRootPortATS; + /* Masks for Flags field above */ #define ACPI_DMAR_INCLUDE_PCI_ALL 1 +#define ACPI_DMAR_ATSR_ALL_PORTS 1 /* * Input Output Remapping Table (IORT) diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h index 901a4ae876..71d3c48e7d 100644 --- a/include/hw/acpi/acpi_dev_interface.h +++ b/include/hw/acpi/acpi_dev_interface.h @@ -57,6 +57,6 @@ typedef struct AcpiDeviceIfClass { void (*ospm_status)(AcpiDeviceIf *adev, ACPIOSTInfoList ***list); void (*send_event)(AcpiDeviceIf *adev, AcpiEventStatusBits ev); void (*madt_cpu)(AcpiDeviceIf *adev, int uid, - CPUArchIdList *apic_ids, GArray *entry); + const CPUArchIdList *apic_ids, GArray *entry); } AcpiDeviceIfClass; #endif diff --git a/include/hw/acpi/memory_hotplug.h b/include/hw/acpi/memory_hotplug.h index d2c7452397..db8ebc9cea 100644 --- a/include/hw/acpi/memory_hotplug.h +++ b/include/hw/acpi/memory_hotplug.h @@ -30,7 +30,7 @@ typedef struct MemHotplugState { } MemHotplugState; void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner, - MemHotplugState *state); + MemHotplugState *state, uint16_t io_base); void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st, DeviceState *dev, Error **errp); @@ -47,11 +47,7 @@ extern const VMStateDescription vmstate_memory_hotplug; void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list); -#define MEMORY_HOTPLUG_DEVICE "MHPD" -#define MEMORY_SLOT_SCAN_METHOD "MSCN" -#define MEMORY_HOTPLUG_HANDLER_PATH "\\_SB.PCI0." \ - MEMORY_HOTPLUG_DEVICE "." MEMORY_SLOT_SCAN_METHOD - -void build_memory_hotplug_aml(Aml *ctx, uint32_t nr_mem, - uint16_t io_base, uint16_t io_len); +void build_memory_hotplug_aml(Aml *table, uint32_t nr_mem, + const char *res_root, + const char *event_handler_method); #endif diff --git a/include/hw/acpi/pc-hotplug.h b/include/hw/acpi/pc-hotplug.h index 6a8d268f84..31bc9191c3 100644 --- a/include/hw/acpi/pc-hotplug.h +++ b/include/hw/acpi/pc-hotplug.h @@ -29,29 +29,6 @@ #define PIIX4_CPU_HOTPLUG_IO_BASE 0xaf00 #define CPU_HOTPLUG_RESOURCE_DEVICE PRES -#define ACPI_MEMORY_HOTPLUG_IO_LEN 24 #define ACPI_MEMORY_HOTPLUG_BASE 0x0a00 -#define MEMORY_SLOTS_NUMBER "MDNR" -#define MEMORY_HOTPLUG_IO_REGION "HPMR" -#define MEMORY_SLOT_ADDR_LOW "MRBL" -#define MEMORY_SLOT_ADDR_HIGH "MRBH" -#define MEMORY_SLOT_SIZE_LOW "MRLL" -#define MEMORY_SLOT_SIZE_HIGH "MRLH" -#define MEMORY_SLOT_PROXIMITY "MPX" -#define MEMORY_SLOT_ENABLED "MES" -#define MEMORY_SLOT_INSERT_EVENT "MINS" -#define MEMORY_SLOT_REMOVE_EVENT "MRMV" -#define MEMORY_SLOT_EJECT "MEJ" -#define MEMORY_SLOT_SLECTOR "MSEL" -#define MEMORY_SLOT_OST_EVENT "MOEV" -#define MEMORY_SLOT_OST_STATUS "MOSC" -#define MEMORY_SLOT_LOCK "MLCK" -#define MEMORY_SLOT_STATUS_METHOD "MRST" -#define MEMORY_SLOT_CRS_METHOD "MCRS" -#define MEMORY_SLOT_OST_METHOD "MOST" -#define MEMORY_SLOT_PROXIMITY_METHOD "MPXM" -#define MEMORY_SLOT_EJECT_METHOD "MEJ0" -#define MEMORY_SLOT_NOTIFY_METHOD "MTFY" - #endif diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index 5406b498d7..1ab5deaa08 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -27,8 +27,9 @@ typedef struct AspeedSoCState { DeviceState parent; /*< public >*/ - ARMCPU *cpu; + ARMCPU cpu; MemoryRegion iomem; + MemoryRegion sram; AspeedVICState vic; AspeedTimerCtrlState timerctrl; AspeedI2CState i2c; @@ -46,6 +47,7 @@ typedef struct AspeedSoCInfo { const char *cpu_model; uint32_t silicon_rev; hwaddr sdram_base; + uint64_t sram_size; int spis_num; const hwaddr *spi_bases; const char *fmc_typename; diff --git a/include/hw/arm/virt-acpi-build.h b/include/hw/arm/virt-acpi-build.h deleted file mode 100644 index f5ec749b8f..0000000000 --- a/include/hw/arm/virt-acpi-build.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * - * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD. - * - * Author: Shannon Zhao <zhaoshenglong@huawei.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2 or later, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef QEMU_VIRT_ACPI_BUILD_H -#define QEMU_VIRT_ACPI_BUILD_H - -#include "qemu-common.h" -#include "hw/arm/virt.h" -#include "qemu/notify.h" - -#define ACPI_GICC_ENABLED 1 - -typedef struct VirtGuestInfo { - int smp_cpus; - FWCfgState *fw_cfg; - const MemMapEntry *memmap; - const int *irqmap; - bool use_highmem; - int gic_version; - bool no_its; -} VirtGuestInfo; - - -typedef struct VirtGuestInfoState { - VirtGuestInfo info; - Notifier machine_done; -} VirtGuestInfoState; - -void virt_acpi_setup(VirtGuestInfo *guest_info); - -#endif diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index 9650193253..58ce74e0e5 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -32,10 +32,15 @@ #include "qemu-common.h" #include "exec/hwaddr.h" +#include "qemu/notify.h" +#include "hw/boards.h" +#include "hw/arm/arm.h" #define NUM_GICV2M_SPIS 64 #define NUM_VIRTIO_TRANSPORTS 32 +#define ARCH_GICV3_MAINT_IRQ 9 + #define ARCH_TIMER_VIRT_IRQ 11 #define ARCH_TIMER_S_EL1_IRQ 13 #define ARCH_TIMER_NS_EL1_IRQ 14 @@ -74,5 +79,42 @@ typedef struct MemMapEntry { hwaddr size; } MemMapEntry; +typedef struct { + MachineClass parent; + bool disallow_affinity_adjustment; + bool no_its; + bool no_pmu; + bool claim_edge_triggered_timers; +} VirtMachineClass; + +typedef struct { + MachineState parent; + Notifier machine_done; + FWCfgState *fw_cfg; + bool secure; + bool highmem; + bool virt; + int32_t gic_version; + struct arm_boot_info bootinfo; + const MemMapEntry *memmap; + const int *irqmap; + int smp_cpus; + void *fdt; + int fdt_size; + uint32_t clock_phandle; + uint32_t gic_phandle; + uint32_t msi_phandle; + int psci_conduit; +} VirtMachineState; + +#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt") +#define VIRT_MACHINE(obj) \ + OBJECT_CHECK(VirtMachineState, (obj), TYPE_VIRT_MACHINE) +#define VIRT_MACHINE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(VirtMachineClass, obj, TYPE_VIRT_MACHINE) +#define VIRT_MACHINE_CLASS(klass) \ + OBJECT_CLASS_CHECK(VirtMachineClass, klass, TYPE_VIRT_MACHINE) + +void virt_acpi_setup(VirtMachineState *vms); -#endif +#endif /* QEMU_ARM_VIRT_H */ diff --git a/include/hw/boards.h b/include/hw/boards.h index a51da9c440..ac891a828b 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -135,7 +135,7 @@ struct MachineClass { HotplugHandler *(*get_hotplug_handler)(MachineState *machine, DeviceState *dev); unsigned (*cpu_index_to_socket_id)(unsigned cpu_index); - CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine); + const CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine); HotpluggableCPUList *(*query_hotpluggable_cpus)(MachineState *machine); }; diff --git a/include/hw/compat.h b/include/hw/compat.h index 8dfc7a38c0..34e9b4a660 100644 --- a/include/hw/compat.h +++ b/include/hw/compat.h @@ -1,6 +1,17 @@ #ifndef HW_COMPAT_H #define HW_COMPAT_H +#define HW_COMPAT_2_8 \ + {\ + .driver = "fw_cfg_mem",\ + .property = "x-file-slots",\ + .value = stringify(0x10),\ + },{\ + .driver = "fw_cfg_io",\ + .property = "x-file-slots",\ + .value = stringify(0x10),\ + }, + #define HW_COMPAT_2_7 \ {\ .driver = "virtio-pci",\ diff --git a/include/hw/dma/xlnx_dpdma.h b/include/hw/dma/xlnx_dpdma.h index 664df28ae6..7a304a5bb4 100644 --- a/include/hw/dma/xlnx_dpdma.h +++ b/include/hw/dma/xlnx_dpdma.h @@ -53,7 +53,8 @@ typedef struct XlnxDPDMAState XlnxDPDMAState; * data to the buffer specified by * dpdma_set_host_data_location(). * - * Returns The number of bytes transfered by the DPDMA or 0 if an error occured. + * Returns The number of bytes transferred by the DPDMA + * or 0 if an error occurred. * * @s The DPDMA state. * @channel The channel to start. diff --git a/include/hw/hw.h b/include/hw/hw.h index 3669ebd916..e22d4ce5fa 100644 --- a/include/hw/hw.h +++ b/include/hw/hw.h @@ -12,11 +12,7 @@ #include "hw/irq.h" #include "migration/vmstate.h" #include "qemu/module.h" - -typedef void QEMUResetHandler(void *opaque); - -void qemu_register_reset(QEMUResetHandler *func, void *opaque); -void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); +#include "sysemu/reset.h" void QEMU_NORETURN hw_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); diff --git a/include/hw/i2c/i2c.h b/include/hw/i2c/i2c.h index c4085aa366..2ce611d4c8 100644 --- a/include/hw/i2c/i2c.h +++ b/include/hw/i2c/i2c.h @@ -32,14 +32,22 @@ typedef struct I2CSlaveClass /* Callbacks provided by the device. */ int (*init)(I2CSlave *dev); - /* Master to slave. */ + /* Master to slave. Returns non-zero for a NAK, 0 for success. */ int (*send)(I2CSlave *s, uint8_t data); - /* Slave to master. */ + /* + * Slave to master. This cannot fail, the device should always + * return something here. Negative values probably result in 0xff + * and a possible log from the driver, and shouldn't be used. + */ int (*recv)(I2CSlave *s); - /* Notify the slave of a bus state change. */ - void (*event)(I2CSlave *s, enum i2c_event event); + /* + * Notify the slave of a bus state change. For start event, + * returns non-zero to NAK an operation. For other events the + * return code is not used and should be zero. + */ + int (*event)(I2CSlave *s, enum i2c_event event); } I2CSlaveClass; struct I2CSlave diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index b22e699c46..5a20c5e38e 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -361,7 +361,7 @@ uint16_t pvpanic_port(void); /* acpi-build.c */ void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, - CPUArchIdList *apic_ids, GArray *entry); + const CPUArchIdList *apic_ids, GArray *entry); /* e820 types */ #define E820_RAM 1 @@ -375,14 +375,15 @@ int e820_get_num_entries(void); bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *); #define PC_COMPAT_2_8 \ - -#define PC_COMPAT_2_7 \ - HW_COMPAT_2_7 \ + HW_COMPAT_2_8 \ {\ .driver = "kvmclock",\ .property = "x-mach-use-reliable-get-clock",\ .value = "off",\ },\ + +#define PC_COMPAT_2_7 \ + HW_COMPAT_2_7 \ {\ .driver = TYPE_X86_CPU,\ .property = "l3-cache",\ diff --git a/include/hw/i386/x86-iommu.h b/include/hw/i386/x86-iommu.h index 0c89d9835b..361c07cdc6 100644 --- a/include/hw/i386/x86-iommu.h +++ b/include/hw/i386/x86-iommu.h @@ -73,6 +73,7 @@ typedef struct IEC_Notifier IEC_Notifier; struct X86IOMMUState { SysBusDevice busdev; bool intr_supported; /* Whether vIOMMU supports IR */ + bool dt_supported; /* Whether vIOMMU supports DT */ IommuType type; /* IOMMU type - AMD/Intel */ QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */ }; diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h index f4c349a2ef..af3ca18e2f 100644 --- a/include/hw/intc/arm_gic_common.h +++ b/include/hw/intc/arm_gic_common.h @@ -55,6 +55,8 @@ typedef struct GICState { qemu_irq parent_irq[GIC_NCPU]; qemu_irq parent_fiq[GIC_NCPU]; + qemu_irq parent_virq[GIC_NCPU]; + qemu_irq parent_vfiq[GIC_NCPU]; /* GICD_CTLR; for a GIC with the security extensions the NS banked version * of this register is just an alias of bit 1 of the S banked version. */ diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h index 341a3118f0..4156051d98 100644 --- a/include/hw/intc/arm_gicv3_common.h +++ b/include/hw/intc/arm_gicv3_common.h @@ -38,6 +38,9 @@ /* Number of SGI target-list bits */ #define GICV3_TARGETLIST_BITS 16 +/* Maximum number of list registers (architectural limit) */ +#define GICV3_LR_MAX 16 + /* Minimum BPR for Secure, or when security not enabled */ #define GIC_MIN_BPR 0 /* Minimum BPR for Nonsecure when security is enabled */ @@ -145,6 +148,9 @@ struct GICv3CPUState { CPUState *cpu; qemu_irq parent_irq; qemu_irq parent_fiq; + qemu_irq parent_virq; + qemu_irq parent_vfiq; + qemu_irq maintenance_irq; /* Redistributor */ uint32_t level; /* Current IRQ level */ @@ -173,6 +179,21 @@ struct GICv3CPUState { uint64_t icc_igrpen[3]; uint64_t icc_ctlr_el3; + /* Virtualization control interface */ + uint64_t ich_apr[3][4]; /* ich_apr[GICV3_G1][x] never used */ + uint64_t ich_hcr_el2; + uint64_t ich_lr_el2[GICV3_LR_MAX]; + uint64_t ich_vmcr_el2; + + /* Properties of the CPU interface. These are initialized from + * the settings in the CPU proper. + * If the number of implemented list registers is 0 then the + * virtualization support is not implemented. + */ + int num_list_regs; + int vpribits; /* number of virtual priority bits */ + int vprebits; /* number of virtual preemption bits */ + /* Current highest priority pending interrupt for this CPU. * This is cached information that can be recalculated from the * real state above; it doesn't need to be migrated. diff --git a/include/hw/loader.h b/include/hw/loader.h index 0c864cfd60..0dbd8d6bf3 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -180,7 +180,8 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, size_t max_len, hwaddr addr, const char *fw_file_name, FWCfgReadCallback fw_callback, - void *callback_opaque, AddressSpace *as); + void *callback_opaque, AddressSpace *as, + bool read_only); int rom_add_elf_program(const char *name, void *data, size_t datasize, size_t romsize, hwaddr addr, AddressSpace *as); int rom_check_and_register_reset(void); @@ -194,7 +195,7 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict); #define rom_add_file_fixed(_f, _a, _i) \ rom_add_file(_f, NULL, _a, _i, false, NULL, NULL) #define rom_add_blob_fixed(_f, _b, _l, _a) \ - rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, NULL) + rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, NULL, true) #define rom_add_file_mr(_f, _mr, _i) \ rom_add_file(_f, NULL, 0, _i, false, _mr, NULL) #define rom_add_file_as(_f, _as, _i) \ @@ -202,7 +203,7 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict); #define rom_add_file_fixed_as(_f, _a, _i, _as) \ rom_add_file(_f, NULL, _a, _i, false, NULL, _as) #define rom_add_blob_fixed_as(_f, _b, _l, _a, _as) \ - rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, _as) + rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, _as, true) #define PC_ROM_MIN_VGA 0xc0000 #define PC_ROM_MIN_OPTION 0xc8000 diff --git a/include/hw/m68k/mcf.h b/include/hw/m68k/mcf.h index fdae229502..bf43998d9b 100644 --- a/include/hw/m68k/mcf.h +++ b/include/hw/m68k/mcf.h @@ -21,10 +21,6 @@ qemu_irq *mcf_intc_init(struct MemoryRegion *sysmem, hwaddr base, M68kCPU *cpu); -/* mcf_fec.c */ -void mcf_fec_init(struct MemoryRegion *sysmem, NICInfo *nd, - hwaddr base, qemu_irq *irq); - /* mcf5206.c */ qemu_irq *mcf5206_init(struct MemoryRegion *sysmem, uint32_t base, M68kCPU *cpu); diff --git a/include/hw/m68k/mcf_fec.h b/include/hw/m68k/mcf_fec.h new file mode 100644 index 0000000000..7f029f7b59 --- /dev/null +++ b/include/hw/m68k/mcf_fec.h @@ -0,0 +1,13 @@ +/* + * Definitions for the ColdFire Fast Ethernet Controller emulation. + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#define TYPE_MCF_FEC_NET "mcf-fec" +#define MCF_FEC_NET(obj) OBJECT_CHECK(mcf_fec_state, (obj), TYPE_MCF_FEC_NET) + +#define FEC_NUM_IRQ 13 diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h index 14ffc43de8..bd4ac013f9 100644 --- a/include/hw/misc/aspeed_scu.h +++ b/include/hw/misc/aspeed_scu.h @@ -32,6 +32,7 @@ typedef struct AspeedSCUState { } AspeedSCUState; #define AST2400_A0_SILICON_REV 0x02000303U +#define AST2400_A1_SILICON_REV 0x02010303U #define AST2500_A0_SILICON_REV 0x04000303U #define AST2500_A1_SILICON_REV 0x04010303U diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h index 5c27a1f0d5..b980cbaebf 100644 --- a/include/hw/nvram/fw_cfg.h +++ b/include/hw/nvram/fw_cfg.h @@ -136,6 +136,7 @@ void fw_cfg_add_file(FWCfgState *s, const char *filename, void *data, * @callback_opaque: argument to be passed into callback function * @data: pointer to start of item data * @len: size of item data + * @read_only: is file read only * * Add a new NAMED fw_cfg item as a raw "blob" of the given size. The data * referenced by the starting pointer is only linked, NOT copied, into the @@ -151,7 +152,7 @@ void fw_cfg_add_file(FWCfgState *s, const char *filename, void *data, */ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, FWCfgReadCallback callback, void *callback_opaque, - void *data, size_t len); + void *data, size_t len, bool read_only); /** * fw_cfg_modify_file: diff --git a/include/hw/nvram/fw_cfg_keys.h b/include/hw/nvram/fw_cfg_keys.h index 0f3e871884..b6919451f5 100644 --- a/include/hw/nvram/fw_cfg_keys.h +++ b/include/hw/nvram/fw_cfg_keys.h @@ -29,8 +29,7 @@ #define FW_CFG_FILE_DIR 0x19 #define FW_CFG_FILE_FIRST 0x20 -#define FW_CFG_FILE_SLOTS 0x10 -#define FW_CFG_MAX_ENTRY (FW_CFG_FILE_FIRST + FW_CFG_FILE_SLOTS) +#define FW_CFG_FILE_SLOTS_MIN 0x10 #define FW_CFG_WRITE_CHANNEL 0x4000 #define FW_CFG_ARCH_LOCAL 0x8000 diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h index 94486fdd37..53b6760c16 100644 --- a/include/hw/pci-host/q35.h +++ b/include/hw/pci-host/q35.h @@ -180,7 +180,7 @@ typedef struct Q35PCIHost { uint64_t mch_mcfg_base(void); /* - * Arbitary but unique BNF number for IOAPIC device. + * Arbitrary but unique BNF number for IOAPIC device. * * TODO: make sure there would have no conflict with real PCI bus */ diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index 056d25e53c..163c5195b6 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -74,6 +74,9 @@ struct PCIExpressDevice { /* AER */ uint16_t aer_cap; PCIEAERLog aer_log; + + /* Offset of ATS capability in config space */ + uint16_t ats_cap; }; #define COMPAT_PROP_PCP "power_controller_present" @@ -120,16 +123,7 @@ void pcie_add_capability(PCIDevice *dev, void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn); void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num); - -extern const VMStateDescription vmstate_pcie_device; - -#define VMSTATE_PCIE_DEVICE(_field, _state) { \ - .name = (stringify(_field)), \ - .size = sizeof(PCIDevice), \ - .vmsd = &vmstate_pcie_device, \ - .flags = VMS_STRUCT, \ - .offset = vmstate_offset_value(_state, _field, PCIDevice), \ -} +void pcie_ats_init(PCIDevice *dev, uint16_t offset); void pcie_cap_slot_hotplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); diff --git a/include/hw/pci/pcie_aer.h b/include/hw/pci/pcie_aer.h index c2ee4e2bdb..526802bd31 100644 --- a/include/hw/pci/pcie_aer.h +++ b/include/hw/pci/pcie_aer.h @@ -44,7 +44,6 @@ struct PCIEAERLog { */ #define PCIE_AER_LOG_MAX_DEFAULT 8 #define PCIE_AER_LOG_MAX_LIMIT 128 -#define PCIE_AER_LOG_MAX_UNSET 0xffff uint16_t log_max; /* Error log. log_max-sized array */ @@ -87,7 +86,8 @@ struct PCIEAERErr { extern const VMStateDescription vmstate_pcie_aer_log; -int pcie_aer_init(PCIDevice *dev, uint16_t offset, uint16_t size); +int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset, + uint16_t size, Error **errp); void pcie_aer_exit(PCIDevice *dev); void pcie_aer_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len); diff --git a/include/hw/register.h b/include/hw/register.h index 8c12233b75..5b6dc32091 100644 --- a/include/hw/register.h +++ b/include/hw/register.h @@ -92,7 +92,7 @@ struct RegisterInfo { * This structure is used to group all of the individual registers which are * modeled using the RegisterInfo structure. * - * @r is an aray containing of all the relevent RegisterInfo structures. + * @r is an array containing of all the relevant RegisterInfo structures. * * @num_elements is the number of elements in the array r * diff --git a/include/hw/smbios/smbios.h b/include/hw/smbios/smbios.h index 1cd53cc58c..31e8d5f47e 100644 --- a/include/hw/smbios/smbios.h +++ b/include/hw/smbios/smbios.h @@ -257,7 +257,7 @@ struct smbios_type_127 { struct smbios_structure_header header; } QEMU_PACKED; -void smbios_entry_add(QemuOpts *opts); +void smbios_entry_add(QemuOpts *opts, Error **errp); void smbios_set_cpuid(uint32_t version, uint32_t features); void smbios_set_defaults(const char *manufacturer, const char *product, const char *version, bool legacy_mode, diff --git a/include/hw/sparc/sparc64.h b/include/hw/sparc/sparc64.h new file mode 100644 index 0000000000..7748939a97 --- /dev/null +++ b/include/hw/sparc/sparc64.h @@ -0,0 +1,5 @@ + +SPARCCPU *sparc64_cpu_devinit(const char *cpu_model, + const char *dflt_cpu_model, uint64_t prom_addr); + +void sparc64_cpu_set_ivec_irq(void *opaque, int irq, int level); diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h index bdfbcc0ffa..1f557313fa 100644 --- a/include/hw/ssi/aspeed_smc.h +++ b/include/hw/ssi/aspeed_smc.h @@ -44,10 +44,12 @@ typedef struct AspeedSMCController { const AspeedSegments *segments; hwaddr flash_window_base; uint32_t flash_window_size; + bool has_dma; + uint32_t nregs; } AspeedSMCController; typedef struct AspeedSMCFlash { - const struct AspeedSMCState *controller; + struct AspeedSMCState *controller; uint8_t id; uint32_t size; diff --git a/include/hw/timer/sun4v-rtc.h b/include/hw/timer/sun4v-rtc.h new file mode 100644 index 0000000000..407278f918 --- /dev/null +++ b/include/hw/timer/sun4v-rtc.h @@ -0,0 +1 @@ +void sun4v_rtc_init(hwaddr addr); diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index 6e90703cad..c3cf4a72bc 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -11,6 +11,8 @@ #ifndef VHOST_BACKEND_H #define VHOST_BACKEND_H +#include "exec/memory.h" + typedef enum VhostBackendType { VHOST_BACKEND_TYPE_NONE = 0, VHOST_BACKEND_TYPE_KERNEL = 1, @@ -32,6 +34,7 @@ typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev); typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev, struct vhost_vring_file *file); +typedef int (*vhost_net_set_mtu_op)(struct vhost_dev *dev, uint16_t mtu); typedef int (*vhost_scsi_set_endpoint_op)(struct vhost_dev *dev, struct vhost_scsi_target *target); typedef int (*vhost_scsi_clear_endpoint_op)(struct vhost_dev *dev, @@ -76,6 +79,14 @@ typedef bool (*vhost_backend_can_merge_op)(struct vhost_dev *dev, typedef int (*vhost_vsock_set_guest_cid_op)(struct vhost_dev *dev, uint64_t guest_cid); typedef int (*vhost_vsock_set_running_op)(struct vhost_dev *dev, int start); +typedef void (*vhost_set_iotlb_callback_op)(struct vhost_dev *dev, + int enabled); +typedef int (*vhost_update_device_iotlb_op)(struct vhost_dev *dev, + uint64_t iova, uint64_t uaddr, + uint64_t len, + IOMMUAccessFlags perm); +typedef int (*vhost_invalidate_device_iotlb_op)(struct vhost_dev *dev, + uint64_t iova, uint64_t len); typedef struct VhostOps { VhostBackendType backend_type; @@ -83,6 +94,7 @@ typedef struct VhostOps { vhost_backend_cleanup vhost_backend_cleanup; vhost_backend_memslots_limit vhost_backend_memslots_limit; vhost_net_set_backend_op vhost_net_set_backend; + vhost_net_set_mtu_op vhost_net_set_mtu; vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint; vhost_scsi_clear_endpoint_op vhost_scsi_clear_endpoint; vhost_scsi_get_abi_version_op vhost_scsi_get_abi_version; @@ -107,6 +119,9 @@ typedef struct VhostOps { vhost_backend_can_merge_op vhost_backend_can_merge; vhost_vsock_set_guest_cid_op vhost_vsock_set_guest_cid; vhost_vsock_set_running_op vhost_vsock_set_running; + vhost_set_iotlb_callback_op vhost_set_iotlb_callback; + vhost_update_device_iotlb_op vhost_update_device_iotlb; + vhost_invalidate_device_iotlb_op vhost_invalidate_device_iotlb; } VhostOps; extern const VhostOps user_ops; diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index 1fe5aadef5..52f633ec89 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -21,6 +21,7 @@ struct vhost_virtqueue { unsigned long long used_phys; unsigned used_size; EventNotifier masked_notifier; + struct vhost_dev *dev; }; typedef unsigned long vhost_log_chunk_t; @@ -38,6 +39,7 @@ struct vhost_log { struct vhost_memory; struct vhost_dev { + VirtIODevice *vdev; MemoryListener memory_listener; struct vhost_memory *mem; int n_mem_sections; @@ -62,6 +64,7 @@ struct vhost_dev { void *opaque; struct vhost_log *log; QLIST_ENTRY(vhost_dev) entry; + IOMMUNotifier n; }; int vhost_dev_init(struct vhost_dev *hdev, void *opaque, @@ -91,4 +94,5 @@ bool vhost_has_free_slot(void); int vhost_net_set_backend(struct vhost_dev *hdev, struct vhost_vring_file *file); +void vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write); #endif diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h index 440b4555ea..91ae14d254 100644 --- a/include/hw/virtio/virtio-access.h +++ b/include/hw/virtio/virtio-access.h @@ -17,6 +17,7 @@ #define QEMU_VIRTIO_ACCESS_H #include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" #include "exec/address-spaces.h" #if defined(TARGET_PPC64) || defined(TARGET_ARM) @@ -40,45 +41,55 @@ static inline bool virtio_access_is_big_endian(VirtIODevice *vdev) static inline uint16_t virtio_lduw_phys(VirtIODevice *vdev, hwaddr pa) { + AddressSpace *dma_as = vdev->dma_as; + if (virtio_access_is_big_endian(vdev)) { - return lduw_be_phys(&address_space_memory, pa); + return lduw_be_phys(dma_as, pa); } - return lduw_le_phys(&address_space_memory, pa); + return lduw_le_phys(dma_as, pa); } static inline uint32_t virtio_ldl_phys(VirtIODevice *vdev, hwaddr pa) { + AddressSpace *dma_as = vdev->dma_as; + if (virtio_access_is_big_endian(vdev)) { - return ldl_be_phys(&address_space_memory, pa); + return ldl_be_phys(dma_as, pa); } - return ldl_le_phys(&address_space_memory, pa); + return ldl_le_phys(dma_as, pa); } static inline uint64_t virtio_ldq_phys(VirtIODevice *vdev, hwaddr pa) { + AddressSpace *dma_as = vdev->dma_as; + if (virtio_access_is_big_endian(vdev)) { - return ldq_be_phys(&address_space_memory, pa); + return ldq_be_phys(dma_as, pa); } - return ldq_le_phys(&address_space_memory, pa); + return ldq_le_phys(dma_as, pa); } static inline void virtio_stw_phys(VirtIODevice *vdev, hwaddr pa, uint16_t value) { + AddressSpace *dma_as = vdev->dma_as; + if (virtio_access_is_big_endian(vdev)) { - stw_be_phys(&address_space_memory, pa, value); + stw_be_phys(dma_as, pa, value); } else { - stw_le_phys(&address_space_memory, pa, value); + stw_le_phys(dma_as, pa, value); } } static inline void virtio_stl_phys(VirtIODevice *vdev, hwaddr pa, uint32_t value) { + AddressSpace *dma_as = vdev->dma_as; + if (virtio_access_is_big_endian(vdev)) { - stl_be_phys(&address_space_memory, pa, value); + stl_be_phys(dma_as, pa, value); } else { - stl_le_phys(&address_space_memory, pa, value); + stl_le_phys(dma_as, pa, value); } } diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h index 8a51e2c564..a63c1d216d 100644 --- a/include/hw/virtio/virtio-bus.h +++ b/include/hw/virtio/virtio-bus.h @@ -88,6 +88,7 @@ typedef struct VirtioBusClass { * Note that changing this will break migration for this transport. */ bool has_variable_vring_alignment; + AddressSpace *(*get_dma_as)(DeviceState *d); } VirtioBusClass; struct VirtioBusState { diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 20d1cd683a..f3a98a3261 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -38,6 +38,7 @@ struct virtio_gpu_simple_resource { unsigned int iov_cnt; uint32_t scanout_bitmask; pixman_image_t *image; + uint64_t hostmem; QTAILQ_ENTRY(virtio_gpu_simple_resource) next; }; @@ -68,6 +69,7 @@ enum virtio_gpu_conf_flags { (_cfg.flags & (1 << VIRTIO_GPU_FLAG_STATS_ENABLED)) struct virtio_gpu_conf { + uint64_t max_hostmem; uint32_t max_outputs; uint32_t flags; }; @@ -103,6 +105,7 @@ typedef struct VirtIOGPU { struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS]; struct virtio_gpu_conf conf; + uint64_t hostmem; int enabled_output_bitmask; struct virtio_gpu_config virtio_config; diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h index 0ced975c57..8ea56a8f60 100644 --- a/include/hw/virtio/virtio-net.h +++ b/include/hw/virtio/virtio-net.h @@ -36,6 +36,7 @@ typedef struct virtio_net_conf int32_t txburst; char *tx; uint16_t rx_queue_size; + uint16_t mtu; } virtio_net_conf; /* Maximum packet size we can receive from tap device: header + 64k */ diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index ab0e030cc4..6523bacd2f 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -92,6 +92,7 @@ struct VirtIODevice char *bus_name; uint8_t device_endian; bool use_guest_notifier_mask; + AddressSpace *dma_as; QLIST_HEAD(, VirtQueue) *vector_queues; }; @@ -170,9 +171,10 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num); void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len, unsigned int idx); -void virtqueue_map(VirtQueueElement *elem); +void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem); void *virtqueue_pop(VirtQueue *vq, size_t sz); -void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz); +unsigned int virtqueue_drop_all(VirtQueue *vq); +void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz); void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem); int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, unsigned int out_bytes); @@ -226,6 +228,7 @@ void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr); hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n); void virtio_queue_set_num(VirtIODevice *vdev, int n, int num); int virtio_queue_get_num(VirtIODevice *vdev, int n); +int virtio_queue_get_max_num(VirtIODevice *vdev, int n); int virtio_get_num_queues(VirtIODevice *vdev); void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, hwaddr avail, hwaddr used); @@ -255,7 +258,9 @@ typedef struct VirtIORNGConf VirtIORNGConf; DEFINE_PROP_BIT64("notify_on_empty", _state, _field, \ VIRTIO_F_NOTIFY_ON_EMPTY, true), \ DEFINE_PROP_BIT64("any_layout", _state, _field, \ - VIRTIO_F_ANY_LAYOUT, true) + VIRTIO_F_ANY_LAYOUT, true), \ + DEFINE_PROP_BIT64("iommu_platform", _state, _field, \ + VIRTIO_F_IOMMU_PLATFORM, false) hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n); hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n); @@ -266,6 +271,7 @@ hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n); uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n); void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx); void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n); +void virtio_queue_update_used_idx(VirtIODevice *vdev, int n); VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n); uint16_t virtio_get_queue_index(VirtQueue *vq); EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq); diff --git a/include/io/dns-resolver.h b/include/io/dns-resolver.h new file mode 100644 index 0000000000..2f69c08c13 --- /dev/null +++ b/include/io/dns-resolver.h @@ -0,0 +1,228 @@ +/* + * QEMU DNS resolver + * + * Copyright (c) 2016-2017 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#ifndef QIO_DNS_RESOLVER_H +#define QIO_DNS_RESOLVER_H + +#include "qemu-common.h" +#include "qom/object.h" +#include "io/task.h" + +#define TYPE_QIO_DNS_RESOLVER "qio-dns-resolver" +#define QIO_DNS_RESOLVER(obj) \ + OBJECT_CHECK(QIODNSResolver, (obj), TYPE_QIO_DNS_RESOLVER) +#define QIO_DNS_RESOLVER_CLASS(klass) \ + OBJECT_CLASS_CHECK(QIODNSResolverClass, klass, TYPE_QIO_DNS_RESOLVER) +#define QIO_DNS_RESOLVER_GET_CLASS(obj) \ + OBJECT_GET_CLASS(QIODNSResolverClass, obj, TYPE_QIO_DNS_RESOLVER) + +typedef struct QIODNSResolver QIODNSResolver; +typedef struct QIODNSResolverClass QIODNSResolverClass; + +/** + * QIODNSResolver: + * + * The QIODNSResolver class provides a framework for doing + * DNS resolution on SocketAddress objects, independently + * of socket creation. + * + * <example> + * <title>Resolving addresses synchronously</title> + * <programlisting> + * int mylisten(SocketAddress *addr, Error **errp) { + * QIODNSResolver *resolver = qio_dns_resolver_get_instance(); + * SocketAddress **rawaddrs = NULL; + * size_t nrawaddrs = 0; + * Error *err = NULL; + * QIOChannel **socks = NULL; + * size_t nsocks = 0; + * + * if (qio_dns_resolver_lookup_sync(dns, addr, &nrawaddrs, + * &rawaddrs, errp) < 0) { + * return -1; + * } + * + * for (i = 0; i < nrawaddrs; i++) { + * QIOChannel *sock = qio_channel_new(); + * Error *local_err = NULL; + * qio_channel_listen_sync(sock, rawaddrs[i], &local_err); + * if (local_err) { + * error_propagate(&err, local_err); + * } else { + * socks = g_renew(QIOChannelSocket *, socks, nsocks + 1); + * socks[nsocks++] = sock; + * } + * qapi_free_SocketAddress(rawaddrs[i]); + * } + * g_free(rawaddrs); + * + * if (nsocks == 0) { + * error_propagate(errp, err); + * } else { + * error_free(err); + * } + * } + * </programlisting> + * </example> + * + * <example> + * <title>Resolving addresses asynchronously</title> + * <programlisting> + * typedef struct MyListenData { + * Error *err; + * QIOChannelSocket **socks; + * size_t nsocks; + * } MyListenData; + * + * void mylistenresult(QIOTask *task, void *opaque) { + * MyListenData *data = opaque; + * QIODNSResolver *resolver = + * QIO_DNS_RESOLVER(qio_task_get_source(task); + * SocketAddress **rawaddrs = NULL; + * size_t nrawaddrs = 0; + * Error *err = NULL; + * + * if (qio_task_propagate_error(task, &data->err)) { + * return; + * } + * + * qio_dns_resolver_lookup_result(resolver, task, + * &nrawaddrs, &rawaddrs); + * + * for (i = 0; i < nrawaddrs; i++) { + * QIOChannel *sock = qio_channel_new(); + * Error *local_err = NULL; + * qio_channel_listen_sync(sock, rawaddrs[i], &local_err); + * if (local_err) { + * error_propagate(&err, local_err); + * } else { + * socks = g_renew(QIOChannelSocket *, socks, nsocks + 1); + * socks[nsocks++] = sock; + * } + * qapi_free_SocketAddress(rawaddrs[i]); + * } + * g_free(rawaddrs); + * + * if (nsocks == 0) { + * error_propagate(&data->err, err); + * } else { + * error_free(err); + * } + * } + * + * void mylisten(SocketAddress *addr, MyListenData *data) { + * QIODNSResolver *resolver = qio_dns_resolver_get_instance(); + * qio_dns_resolver_lookup_async(dns, addr, + * mylistenresult, data, NULL); + * } + * </programlisting> + * </example> + */ +struct QIODNSResolver { + Object parent; +}; + +struct QIODNSResolverClass { + ObjectClass parent; +}; + + +/** + * qio_dns_resolver_get_instance: + * + * Get the singleton dns resolver instance. The caller + * does not own a reference on the returned object. + * + * Returns: the single dns resolver instance + */ +QIODNSResolver *qio_dns_resolver_get_instance(void); + +/** + * qio_dns_resolver_lookup_sync: + * @resolver: the DNS resolver instance + * @addr: the address to resolve + * @naddr: pointer to hold number of resolved addresses + * @addrs: pointer to hold resolved addresses + * @errp: pointer to NULL initialized error object + * + * This will attempt to resolve the address provided + * in @addr. If resolution succeeds, @addrs will be filled + * with all the resolved addresses. @naddrs will specify + * the number of entries allocated in @addrs. The caller + * is responsible for freeing each entry in @addrs, as + * well as @addrs itself. @naddrs is guaranteed to be + * greater than zero on success. + * + * DNS resolution will be done synchronously so execution + * of the caller may be blocked for an arbitrary length + * of time. + * + * Returns: 0 if resolution was successful, -1 on error + */ +int qio_dns_resolver_lookup_sync(QIODNSResolver *resolver, + SocketAddress *addr, + size_t *naddrs, + SocketAddress ***addrs, + Error **errp); + +/** + * qio_dns_resolver_lookup_async: + * @resolver: the DNS resolver instance + * @addr: the address to resolve + * @func: the callback to invoke on lookup completion + * @opaque: data blob to pass to @func + * @notify: the callback to free @opaque, or NULL + * + * This will attempt to resolve the address provided + * in @addr. The callback @func will be invoked when + * resolution has either completed or failed. On + * success, the @func should call the method + * qio_dns_resolver_lookup_result() to obtain the + * results. + * + * DNS resolution will be done asynchronously so execution + * of the caller will not be blocked. + */ +void qio_dns_resolver_lookup_async(QIODNSResolver *resolver, + SocketAddress *addr, + QIOTaskFunc func, + gpointer opaque, + GDestroyNotify notify); + +/** + * qio_dns_resolver_lookup_result: + * @resolver: the DNS resolver instance + * @task: the task object to get results for + * @naddr: pointer to hold number of resolved addresses + * @addrs: pointer to hold resolved addresses + * + * This method should be called from the callback passed + * to qio_dns_resolver_lookup_async() in order to obtain + * results. @addrs will be filled with all the resolved + * addresses. @naddrs will specify the number of entries + * allocated in @addrs. The caller is responsible for + * freeing each entry in @addrs, as well as @addrs itself. + */ +void qio_dns_resolver_lookup_result(QIODNSResolver *resolver, + QIOTask *task, + size_t *naddrs, + SocketAddress ***addrs); + +#endif /* QIO_DNS_RESOLVER_H */ diff --git a/include/io/task.h b/include/io/task.h index 42028cb424..6021f51336 100644 --- a/include/io/task.h +++ b/include/io/task.h @@ -26,13 +26,11 @@ typedef struct QIOTask QIOTask; -typedef void (*QIOTaskFunc)(Object *source, - Error *err, +typedef void (*QIOTaskFunc)(QIOTask *task, gpointer opaque); -typedef int (*QIOTaskWorker)(QIOTask *task, - Error **errp, - gpointer opaque); +typedef void (*QIOTaskWorker)(QIOTask *task, + gpointer opaque); /** * QIOTask: @@ -44,12 +42,12 @@ typedef int (*QIOTaskWorker)(QIOTask *task, * a public API which accepts a task callback: * * <example> - * <title>Task callback function signature</title> + * <title>Task function signature</title> * <programlisting> * void myobject_operation(QMyObject *obj, * QIOTaskFunc *func, * gpointer opaque, - * GDestroyNotify *notify); + * GDestroyNotify notify); * </programlisting> * </example> * @@ -57,17 +55,41 @@ typedef int (*QIOTaskWorker)(QIOTask *task, * is data to pass to it. The optional 'notify' function is used * to free 'opaque' when no longer needed. * - * Now, lets say the implementation of this method wants to set - * a timer to run once a second checking for completion of some - * activity. It would do something like + * When the operation completes, the 'func' callback will be + * invoked, allowing the calling code to determine the result + * of the operation. An example QIOTaskFunc implementation may + * look like * * <example> - * <title>Task callback function implementation</title> + * <title>Task callback implementation</title> + * <programlisting> + * static void myobject_operation_notify(QIOTask *task, + * gpointer opaque) + * { + * Error *err = NULL; + * if (qio_task_propagate_error(task, &err)) { + * ...deal with the failure... + * error_free(err); + * } else { + * QMyObject *src = QMY_OBJECT(qio_task_get_source(task)); + * ...deal with the completion... + * } + * } + * </programlisting> + * </example> + * + * Now, lets say the implementation of the method using the + * task wants to set a timer to run once a second checking + * for completion of some activity. It would do something + * like + * + * <example> + * <title>Task function implementation</title> * <programlisting> * void myobject_operation(QMyObject *obj, * QIOTaskFunc *func, * gpointer opaque, - * GDestroyNotify *notify) + * GDestroyNotify notify) * { * QIOTask *task; * @@ -102,8 +124,8 @@ typedef int (*QIOTaskWorker)(QIOTask *task, * * ...check something important... * if (err) { - * qio_task_abort(task, err); - * error_free(task); + * qio_task_set_error(task, err); + * qio_task_complete(task); * return FALSE; * } else if (...work is completed ...) { * qio_task_complete(task); @@ -115,6 +137,10 @@ typedef int (*QIOTaskWorker)(QIOTask *task, * </programlisting> * </example> * + * The 'qio_task_complete' call in this method will trigger + * the callback func 'myobject_operation_notify' shown + * earlier to deal with the results. + * * Once this function returns false, object_unref will be called * automatically on the task causing it to be released and the * ref on QMyObject dropped too. @@ -136,25 +162,23 @@ typedef int (*QIOTaskWorker)(QIOTask *task, * socket listen using QIOTask would require: * * <example> - * static int myobject_listen_worker(QIOTask *task, - * Error **errp, - * gpointer opaque) + * static void myobject_listen_worker(QIOTask *task, + * gpointer opaque) * { * QMyObject obj = QMY_OBJECT(qio_task_get_source(task)); * SocketAddress *addr = opaque; + * Error *err = NULL; * - * obj->fd = socket_listen(addr, errp); - * if (obj->fd < 0) { - * return -1; - * } - * return 0; + * obj->fd = socket_listen(addr, &err); + * + qio_task_set_error(task, err); * } * * void myobject_listen_async(QMyObject *obj, * SocketAddress *addr, * QIOTaskFunc *func, * gpointer opaque, - * GDestroyNotify *notify) + * GDestroyNotify notify) * { * QIOTask *task; * SocketAddress *addrCopy; @@ -187,8 +211,8 @@ typedef int (*QIOTaskWorker)(QIOTask *task, * 'err' attribute in the task object to determine if * the operation was successful or not. * - * The returned task will be released when one of - * qio_task_abort() or qio_task_complete() are invoked. + * The returned task will be released when qio_task_complete() + * is invoked. * * Returns: the task struct */ @@ -204,10 +228,8 @@ QIOTask *qio_task_new(Object *source, * @opaque: opaque data to pass to @worker * @destroy: function to free @opaque * - * Run a task in a background thread. If @worker - * returns 0 it will call qio_task_complete() in - * the main event thread context. If @worker - * returns -1 it will call qio_task_abort() in + * Run a task in a background thread. When @worker + * returns it will call qio_task_complete() in * the main event thread context. */ void qio_task_run_in_thread(QIOTask *task, @@ -219,24 +241,69 @@ void qio_task_run_in_thread(QIOTask *task, * qio_task_complete: * @task: the task struct * - * Mark the operation as successfully completed - * and free the memory for @task. + * Invoke the completion callback for @task and + * then free its memory. */ void qio_task_complete(QIOTask *task); + +/** + * qio_task_set_error: + * @task: the task struct + * @err: pointer to the error, or NULL + * + * Associate an error with the task, which can later + * be retrieved with the qio_task_propagate_error() + * method. This method takes ownership of @err, so + * it is not valid to access it after this call + * completes. If @err is NULL this is a no-op. If + * this is call multiple times, only the first + * provided @err will be recorded, later ones will + * be discarded and freed. + */ +void qio_task_set_error(QIOTask *task, + Error *err); + + /** - * qio_task_abort: + * qio_task_propagate_error: * @task: the task struct - * @err: the error to record for the operation + * @errp: pointer to a NULL-initialized error object + * + * Propagate the error associated with @task + * into @errp. + * + * Returns: true if an error was propagated, false otherwise + */ +bool qio_task_propagate_error(QIOTask *task, + Error **errp); + + +/** + * qio_task_set_result_pointer: + * @task: the task struct + * @result: pointer to the result data + * + * Associate an opaque result with the task, + * which can later be retrieved with the + * qio_task_get_result_pointer() method + * + */ +void qio_task_set_result_pointer(QIOTask *task, + gpointer result, + GDestroyNotify notify); + + +/** + * qio_task_get_result_pointer: + * @task: the task struct + * + * Retrieve the opaque result data associated + * with the task, if any. * - * Mark the operation as failed, with @err providing - * details about the failure. The @err may be freed - * afer the function returns, as the notification - * callback is invoked synchronously. The @task will - * be freed when this call completes. + * Returns: the task result, or NULL */ -void qio_task_abort(QIOTask *task, - Error *err); +gpointer qio_task_get_result_pointer(QIOTask *task); /** @@ -244,9 +311,10 @@ void qio_task_abort(QIOTask *task, * @task: the task struct * * Get the source object associated with the background - * task. This returns a new reference to the object, - * which the caller must released with object_unref() - * when no longer required. + * task. The caller does not own a reference on the + * returned Object, and so should call object_ref() + * if it wants to keep the object pointer outside the + * lifetime of the QIOTask object. * * Returns: the source object */ diff --git a/include/migration/migration.h b/include/migration/migration.h index c309d23370..af9135f0a7 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -38,6 +38,9 @@ #define QEMU_VM_COMMAND 0x08 #define QEMU_VM_SECTION_FOOTER 0x7e +/* for vl.c */ +extern int only_migratable; + struct MigrationParams { bool blk; bool shared; @@ -177,6 +180,9 @@ struct MigrationState /* Flag set once the migration thread is running (and needs joining) */ bool migration_thread_running; + /* Flag set once the migration thread called bdrv_inactivate_all */ + bool block_inactive; + /* Queue of outstanding page requests from the destination */ QemuMutex src_page_req_mutex; QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests; @@ -240,6 +246,7 @@ void remove_migration_state_change_notifier(Notifier *notify); MigrationState *migrate_init(const MigrationParams *params); bool migration_is_blocked(Error **errp); bool migration_in_setup(MigrationState *); +bool migration_is_idle(MigrationState *s); bool migration_has_finished(MigrationState *); bool migration_has_failed(MigrationState *); /* True if outgoing migration has entered postcopy phase */ @@ -284,8 +291,12 @@ int ram_postcopy_incoming_init(MigrationIncomingState *mis); * @migrate_add_blocker - prevent migration from proceeding * * @reason - an error to be returned whenever migration is attempted + * + * @errp - [out] The reason (if any) we cannot block migration right now. + * + * @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set. */ -void migrate_add_blocker(Error *reason); +int migrate_add_blocker(Error *reason, Error **errp); /** * @migrate_del_blocker - remove a blocking error from migration diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index 1638ee57f7..3bbe3ed984 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -81,11 +81,20 @@ void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque); typedef struct VMStateInfo VMStateInfo; typedef struct VMStateDescription VMStateDescription; - +typedef struct VMStateField VMStateField; + +/* VMStateInfo allows customized migration of objects that don't fit in + * any category in VMStateFlags. Additional information is always passed + * into get and put in terms of field and vmdesc parameters. However + * these two parameters should only be used in cases when customized + * handling is needed, such as QTAILQ. For primitive data types such as + * integer, field and vmdesc parameters should be ignored inside get/put. + */ struct VMStateInfo { const char *name; - int (*get)(QEMUFile *f, void *pv, size_t size); - void (*put)(QEMUFile *f, void *pv, size_t size); + int (*get)(QEMUFile *f, void *pv, size_t size, VMStateField *field); + int (*put)(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc); }; enum VMStateFlags { @@ -186,7 +195,13 @@ enum VMStateFlags { VMS_MULTIPLY_ELEMENTS = 0x4000, }; -typedef struct { +typedef enum { + MIG_PRI_DEFAULT = 0, + MIG_PRI_IOMMU, /* Must happen before PCI devices */ + MIG_PRI_MAX, +} MigrationPriority; + +struct VMStateField { const char *name; size_t offset; size_t size; @@ -199,7 +214,7 @@ typedef struct { const VMStateDescription *vmsd; int version_id; bool (*field_exists)(void *opaque, int version_id); -} VMStateField; +}; struct VMStateDescription { const char *name; @@ -207,6 +222,7 @@ struct VMStateDescription { int version_id; int minimum_version_id; int minimum_version_id_old; + MigrationPriority priority; LoadStateHandler *load_state_old; int (*pre_load)(void *opaque); int (*post_load)(void *opaque, int version_id); @@ -244,6 +260,7 @@ extern const VMStateInfo vmstate_info_timer; extern const VMStateInfo vmstate_info_buffer; extern const VMStateInfo vmstate_info_unused_buffer; extern const VMStateInfo vmstate_info_bitmap; +extern const VMStateInfo vmstate_info_qtailq; #define type_check_2darray(t1,t2,n,m) ((t1(*)[n][m])0 - (t2*)0) #define type_check_array(t1,t2,n) ((t1(*)[n])0 - (t2*)0) @@ -655,6 +672,25 @@ extern const VMStateInfo vmstate_info_bitmap; .offset = offsetof(_state, _field), \ } +/* For migrating a QTAILQ. + * Target QTAILQ needs be properly initialized. + * _type: type of QTAILQ element + * _next: name of QTAILQ entry field in QTAILQ element + * _vmsd: VMSD for QTAILQ element + * size: size of QTAILQ element + * start: offset of QTAILQ entry in QTAILQ element + */ +#define VMSTATE_QTAILQ_V(_field, _state, _version, _vmsd, _type, _next) \ +{ \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .vmsd = &(_vmsd), \ + .size = sizeof(_type), \ + .info = &vmstate_info_qtailq, \ + .offset = offsetof(_state, _field), \ + .start = offsetof(_type, _next), \ +} + /* _f : field name _f_n : num of elements field_name _n : num of elements diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h index 5a08efffef..afc1499eb9 100644 --- a/include/net/vhost_net.h +++ b/include/net/vhost_net.h @@ -35,4 +35,6 @@ int vhost_set_vring_enable(NetClientState * nc, int enable); uint64_t vhost_net_get_acked_features(VHostNetState *net); +int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu); + #endif diff --git a/include/qapi/dealloc-visitor.h b/include/qapi/dealloc-visitor.h index b3e5c85fd8..c36715fdf3 100644 --- a/include/qapi/dealloc-visitor.h +++ b/include/qapi/dealloc-visitor.h @@ -19,7 +19,7 @@ typedef struct QapiDeallocVisitor QapiDeallocVisitor; /* - * The dealloc visitor is primarly used only by generated + * The dealloc visitor is primarily used only by generated * qapi_free_FOO() functions, and is the only visitor designed to work * correctly in the face of a partially-constructed QAPI tree. */ diff --git a/include/qapi/error.h b/include/qapi/error.h index 0576659603..7e532d00e9 100644 --- a/include/qapi/error.h +++ b/include/qapi/error.h @@ -170,6 +170,9 @@ void error_setg_internal(Error **errp, * Just like error_setg(), with @os_error info added to the message. * If @os_error is non-zero, ": " + strerror(os_error) is appended to * the human-readable error message. + * + * The value of errno (which usually can get clobbered by almost any + * function call) will be preserved. */ #define error_setg_errno(errp, os_error, fmt, ...) \ error_setg_errno_internal((errp), __FILE__, __LINE__, __func__, \ diff --git a/include/qemu/config-file.h b/include/qemu/config-file.h index 8d4b2b6d94..c80d5c8a33 100644 --- a/include/qemu/config-file.h +++ b/include/qemu/config-file.h @@ -23,8 +23,4 @@ int qemu_read_config_file(const char *filename); void qemu_config_parse_qdict(QDict *options, QemuOptsList **lists, Error **errp); -/* Read default QEMU config files - */ -int qemu_read_default_config_files(bool userconfig); - #endif /* QEMU_CONFIG_FILE_H */ diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h index e6a60d55fd..12584ed1b7 100644 --- a/include/qemu/coroutine.h +++ b/include/qemu/coroutine.h @@ -71,6 +71,12 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque); void qemu_coroutine_enter(Coroutine *coroutine); /** + * Transfer control to a coroutine if it's not active (i.e. part of the call + * stack of the running coroutine). Otherwise, do nothing. + */ +void qemu_coroutine_enter_if_inactive(Coroutine *co); + +/** * Transfer control back to a coroutine's caller * * This function does not return until the coroutine is re-entered using diff --git a/include/qemu/event_notifier.h b/include/qemu/event_notifier.h index e326990db4..599c99f1a5 100644 --- a/include/qemu/event_notifier.h +++ b/include/qemu/event_notifier.h @@ -34,9 +34,6 @@ int event_notifier_init(EventNotifier *, int active); void event_notifier_cleanup(EventNotifier *); int event_notifier_set(EventNotifier *); int event_notifier_test_and_clear(EventNotifier *); -int event_notifier_set_handler(EventNotifier *, - bool is_external, - EventNotifierHandler *); #ifdef CONFIG_POSIX void event_notifier_init_fd(EventNotifier *, int fd); diff --git a/include/qemu/futex.h b/include/qemu/futex.h new file mode 100644 index 0000000000..bb7dc9e296 --- /dev/null +++ b/include/qemu/futex.h @@ -0,0 +1,36 @@ +/* + * Wrappers around Linux futex syscall + * + * Copyright Red Hat, Inc. 2017 + * + * Author: + * Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include <sys/syscall.h> +#include <linux/futex.h> + +#define qemu_futex(...) syscall(__NR_futex, __VA_ARGS__) + +static inline void qemu_futex_wake(void *f, int n) +{ + qemu_futex(f, FUTEX_WAKE, n, NULL, NULL, 0); +} + +static inline void qemu_futex_wait(void *f, unsigned val) +{ + while (qemu_futex(f, FUTEX_WAIT, (int) val, NULL, NULL, 0)) { + switch (errno) { + case EWOULDBLOCK: + return; + case EINTR: + break; /* get out of switch and retry */ + default: + abort(); + } + } +} diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index 46187bbc7e..96288d0bce 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -327,7 +327,7 @@ static inline int ctpop8(uint8_t val) #else val = (val & 0x55) + ((val >> 1) & 0x55); val = (val & 0x33) + ((val >> 2) & 0x33); - val = (val & 0x0f) + ((val >> 4) & 0x0f); + val = (val + (val >> 4)) & 0x0f; return val; #endif @@ -344,8 +344,8 @@ static inline int ctpop16(uint16_t val) #else val = (val & 0x5555) + ((val >> 1) & 0x5555); val = (val & 0x3333) + ((val >> 2) & 0x3333); - val = (val & 0x0f0f) + ((val >> 4) & 0x0f0f); - val = (val & 0x00ff) + ((val >> 8) & 0x00ff); + val = (val + (val >> 4)) & 0x0f0f; + val = (val + (val >> 8)) & 0x00ff; return val; #endif @@ -360,11 +360,10 @@ static inline int ctpop32(uint32_t val) #if QEMU_GNUC_PREREQ(3, 4) return __builtin_popcount(val); #else - val = (val & 0x55555555) + ((val >> 1) & 0x55555555); - val = (val & 0x33333333) + ((val >> 2) & 0x33333333); - val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); - val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff); - val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff); + val = (val & 0x55555555) + ((val >> 1) & 0x55555555); + val = (val & 0x33333333) + ((val >> 2) & 0x33333333); + val = (val + (val >> 4)) & 0x0f0f0f0f; + val = (val * 0x01010101) >> 24; return val; #endif @@ -379,12 +378,10 @@ static inline int ctpop64(uint64_t val) #if QEMU_GNUC_PREREQ(3, 4) return __builtin_popcountll(val); #else - val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); - val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); - val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL); - val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & 0x00ff00ff00ff00ffULL); - val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & 0x0000ffff0000ffffULL); - val = (val & 0x00000000ffffffffULL) + ((val >> 32) & 0x00000000ffffffffULL); + val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); + val = (val + (val >> 4)) & 0x0f0f0f0f0f0f0f0fULL; + val = (val * 0x0101010101010101ULL) >> 56; return val; #endif diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index a9d4f23cd9..d7e24af78d 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -203,6 +203,21 @@ void qemu_set_fd_handler(int fd, IOHandler *fd_write, void *opaque); + +/** + * event_notifier_set_handler: Register an EventNotifier with the main loop + * + * This function tells the main loop to wake up whenever the + * #EventNotifier was set. + * + * @e: The #EventNotifier to be observed. + * + * @handler: A level-triggered callback that is fired when @e + * has been set. @e is passed to it as a parameter. + */ +void event_notifier_set_handler(EventNotifier *e, + EventNotifierHandler *handler); + GSource *iohandler_get_g_source(void); AioContext *iohandler_get_aio_context(void); #ifdef CONFIG_POSIX diff --git a/include/qemu/qht.h b/include/qemu/qht.h index 311139b85a..56c2c7784c 100644 --- a/include/qemu/qht.h +++ b/include/qemu/qht.h @@ -72,7 +72,7 @@ void qht_destroy(struct qht *ht); * In case of successful operation, smp_wmb() is implied before the pointer is * inserted into the hash table. * - * Returns true on sucess. + * Returns true on success. * Returns false if the @p-@hash pair already exists in the hash table. */ bool qht_insert(struct qht *ht, void *p, uint32_t hash); diff --git a/include/qemu/queue.h b/include/qemu/queue.h index 342073fb4d..35292c3155 100644 --- a/include/qemu/queue.h +++ b/include/qemu/queue.h @@ -438,4 +438,64 @@ struct { \ #define QTAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) +#define field_at_offset(base, offset, type) \ + ((type) (((char *) (base)) + (offset))) + +typedef struct DUMMY_Q_ENTRY DUMMY_Q_ENTRY; +typedef struct DUMMY_Q DUMMY_Q; + +struct DUMMY_Q_ENTRY { + QTAILQ_ENTRY(DUMMY_Q_ENTRY) next; +}; + +struct DUMMY_Q { + QTAILQ_HEAD(DUMMY_Q_HEAD, DUMMY_Q_ENTRY) head; +}; + +#define dummy_q ((DUMMY_Q *) 0) +#define dummy_qe ((DUMMY_Q_ENTRY *) 0) + +/* + * Offsets of layout of a tail queue head. + */ +#define QTAILQ_FIRST_OFFSET (offsetof(typeof(dummy_q->head), tqh_first)) +#define QTAILQ_LAST_OFFSET (offsetof(typeof(dummy_q->head), tqh_last)) +/* + * Raw access of elements of a tail queue + */ +#define QTAILQ_RAW_FIRST(head) \ + (*field_at_offset(head, QTAILQ_FIRST_OFFSET, void **)) +#define QTAILQ_RAW_TQH_LAST(head) \ + (*field_at_offset(head, QTAILQ_LAST_OFFSET, void ***)) + +/* + * Offsets of layout of a tail queue element. + */ +#define QTAILQ_NEXT_OFFSET (offsetof(typeof(dummy_qe->next), tqe_next)) +#define QTAILQ_PREV_OFFSET (offsetof(typeof(dummy_qe->next), tqe_prev)) + +/* + * Raw access of elements of a tail entry + */ +#define QTAILQ_RAW_NEXT(elm, entry) \ + (*field_at_offset(elm, entry + QTAILQ_NEXT_OFFSET, void **)) +#define QTAILQ_RAW_TQE_PREV(elm, entry) \ + (*field_at_offset(elm, entry + QTAILQ_PREV_OFFSET, void ***)) +/* + * Tail queue tranversal using pointer arithmetic. + */ +#define QTAILQ_RAW_FOREACH(elm, head, entry) \ + for ((elm) = QTAILQ_RAW_FIRST(head); \ + (elm); \ + (elm) = QTAILQ_RAW_NEXT(elm, entry)) +/* + * Tail queue insertion using pointer arithmetic. + */ +#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \ + QTAILQ_RAW_NEXT(elm, entry) = NULL; \ + QTAILQ_RAW_TQE_PREV(elm, entry) = QTAILQ_RAW_TQH_LAST(head); \ + *QTAILQ_RAW_TQH_LAST(head) = (elm); \ + QTAILQ_RAW_TQH_LAST(head) = &QTAILQ_RAW_NEXT(elm, entry); \ +} while (/*CONSTCOND*/0) + #endif /* QEMU_SYS_QUEUE_H */ diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h index 5589e6842b..5f1bab9b3e 100644 --- a/include/qemu/sockets.h +++ b/include/qemu/sockets.h @@ -32,6 +32,8 @@ int socket_set_fast_reuse(int fd); */ typedef void NonBlockingConnectHandler(int fd, Error *err, void *opaque); +int inet_ai_family_from_address(InetSocketAddress *addr, + Error **errp); InetSocketAddress *inet_parse(const char *str, Error **errp); int inet_connect(const char *str, Error **errp); int inet_connect_saddr(InetSocketAddress *saddr, Error **errp, diff --git a/include/qemu/thread.h b/include/qemu/thread.h index e8e665f020..9910f49b3a 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -8,6 +8,7 @@ typedef struct QemuMutex QemuMutex; typedef struct QemuCond QemuCond; typedef struct QemuSemaphore QemuSemaphore; typedef struct QemuEvent QemuEvent; +typedef struct QemuLockCnt QemuLockCnt; typedef struct QemuThread QemuThread; #ifdef _WIN32 @@ -98,4 +99,115 @@ static inline void qemu_spin_unlock(QemuSpin *spin) __sync_lock_release(&spin->value); } +struct QemuLockCnt { +#ifndef CONFIG_LINUX + QemuMutex mutex; +#endif + unsigned count; +}; + +/** + * qemu_lockcnt_init: initialize a QemuLockcnt + * @lockcnt: the lockcnt to initialize + * + * Initialize lockcnt's counter to zero and prepare its mutex + * for usage. + */ +void qemu_lockcnt_init(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_destroy: destroy a QemuLockcnt + * @lockcnt: the lockcnt to destruct + * + * Destroy lockcnt's mutex. + */ +void qemu_lockcnt_destroy(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_inc: increment a QemuLockCnt's counter + * @lockcnt: the lockcnt to operate on + * + * If the lockcnt's count is zero, wait for critical sections + * to finish and increment lockcnt's count to 1. If the count + * is not zero, just increment it. + * + * Because this function can wait on the mutex, it must not be + * called while the lockcnt's mutex is held by the current thread. + * For the same reason, qemu_lockcnt_inc can also contribute to + * AB-BA deadlocks. This is a sample deadlock scenario: + * + * thread 1 thread 2 + * ------------------------------------------------------- + * qemu_lockcnt_lock(&lc1); + * qemu_lockcnt_lock(&lc2); + * qemu_lockcnt_inc(&lc2); + * qemu_lockcnt_inc(&lc1); + */ +void qemu_lockcnt_inc(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_dec: decrement a QemuLockCnt's counter + * @lockcnt: the lockcnt to operate on + */ +void qemu_lockcnt_dec(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and + * possibly lock it. + * @lockcnt: the lockcnt to operate on + * + * Decrement lockcnt's count. If the new count is zero, lock + * the mutex and return true. Otherwise, return false. + */ +bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and + * lock it. + * @lockcnt: the lockcnt to operate on + * + * If the count is 1, decrement the count to zero, lock + * the mutex and return true. Otherwise, return false. + */ +bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_lock: lock a QemuLockCnt's mutex. + * @lockcnt: the lockcnt to operate on + * + * Remember that concurrent visits are not blocked unless the count is + * also zero. You can use qemu_lockcnt_count to check for this inside a + * critical section. + */ +void qemu_lockcnt_lock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_unlock: release a QemuLockCnt's mutex. + * @lockcnt: the lockcnt to operate on. + */ +void qemu_lockcnt_unlock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt. + * @lockcnt: the lockcnt to operate on. + * + * This is the same as + * + * qemu_lockcnt_unlock(lockcnt); + * qemu_lockcnt_inc(lockcnt); + * + * but more efficient. + */ +void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_count: query a LockCnt's count. + * @lockcnt: the lockcnt to query. + * + * Note that the count can change at any time. Still, while the + * lockcnt is locked, one can usefully check whether the count + * is non-zero. + */ +unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt); + #endif diff --git a/include/qemu/xattr.h b/include/qemu/xattr.h index 83cf98cbd8..a83fe8e749 100644 --- a/include/qemu/xattr.h +++ b/include/qemu/xattr.h @@ -14,7 +14,7 @@ #define QEMU_XATTR_H /* - * Modern distributions (e.g. Fedora 15, have no libattr.so, place attr.h + * Modern distributions (e.g. Fedora 15), have no libattr.so, place attr.h * in /usr/include/sys, and don't have ENOATTR. */ diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 3f79a8e955..ca4d0fb1b4 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -227,6 +227,8 @@ struct CPUWatchpoint { struct KVMState; struct kvm_run; +struct hax_vcpu_state; + #define TB_JMP_CACHE_BITS 12 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) @@ -392,6 +394,9 @@ struct CPUState { (absolute value) offset as small as possible. This reduces code size, especially for hosts without large memory offsets. */ uint32_t tcg_exit_req; + + bool hax_vcpu_dirty; + struct hax_vcpu_state *hax_vcpu; }; QTAILQ_HEAD(CPUTailQ, CPUState); diff --git a/include/qom/object.h b/include/qom/object.h index 5ecc2d166d..cd0f412ce9 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -432,7 +432,7 @@ struct Object * @class_base_init: This function is called for all base classes after all * parent class initialization has occurred, but before the class itself * is initialized. This is the function to use to undo the effects of - * memcpy from the parent class to the descendents. + * memcpy from the parent class to the descendants. * @class_finalize: This function is called during class destruction and is * meant to release and dynamic parameters allocated by @class_init. * @class_data: Data to pass to the @class_init, @class_base_init and @@ -587,18 +587,6 @@ struct InterfaceClass Object *object_new(const char *typename); /** - * object_new_with_type: - * @type: The type of the object to instantiate. - * - * This function will initialize a new object using heap allocated memory. - * The returned object has a reference count of 1, and will be freed when - * the last reference is dropped. - * - * Returns: The newly allocated and instantiated object. - */ -Object *object_new_with_type(Type type); - -/** * object_new_with_props: * @typename: The name of the type of the object to instantiate. * @parent: the parent object @@ -727,18 +715,6 @@ int object_set_propv(Object *obj, va_list vargs); /** - * object_initialize_with_type: - * @data: A pointer to the memory to be used for the object. - * @size: The maximum size available at @data for the object. - * @type: The type of the object to instantiate. - * - * This function will initialize an object. The memory for the object should - * have already been allocated. The returned object has a reference count of 1, - * and will be finalized when the last reference is dropped. - */ -void object_initialize_with_type(void *data, size_t size, Type type); - -/** * object_initialize: * @obj: A pointer to the memory to be used for the object. * @size: The maximum size available at @obj for the object. diff --git a/include/qom/object_interfaces.h b/include/qom/object_interfaces.h index 8b17f4def7..fdd7603c84 100644 --- a/include/qom/object_interfaces.h +++ b/include/qom/object_interfaces.h @@ -76,23 +76,6 @@ void user_creatable_complete(Object *obj, Error **errp); bool user_creatable_can_be_deleted(UserCreatable *uc, Error **errp); /** - * user_creatable_add: - * @qdict: the object definition - * @v: the visitor - * @errp: if an error occurs, a pointer to an area to store the error - * - * Create an instance of the user creatable object whose type - * is defined in @qdict by the 'qom-type' field, placing it - * in the object composition tree with name provided by the - * 'id' field. The remaining fields in @qdict are used to - * initialize the object properties. - * - * Returns: the newly created object or NULL on error - */ -Object *user_creatable_add(const QDict *qdict, - Visitor *v, Error **errp); - -/** * user_creatable_add_type: * @type: the object type name * @id: the unique ID for the object diff --git a/include/standard-headers/linux/virtio_crypto.h b/include/standard-headers/linux/virtio_crypto.h index 82275a84d8..5ff0b4ee59 100644 --- a/include/standard-headers/linux/virtio_crypto.h +++ b/include/standard-headers/linux/virtio_crypto.h @@ -1,5 +1,5 @@ -#ifndef _LINUX_VIRTIO_CRYPTO_H -#define _LINUX_VIRTIO_CRYPTO_H +#ifndef _VIRTIO_CRYPTO_H +#define _VIRTIO_CRYPTO_H /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * @@ -14,52 +14,54 @@ * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. */ - + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ #include "standard-headers/linux/types.h" -#include "standard-headers/linux/virtio_config.h" #include "standard-headers/linux/virtio_types.h" +#include "standard-headers/linux/virtio_ids.h" +#include "standard-headers/linux/virtio_config.h" #define VIRTIO_CRYPTO_SERVICE_CIPHER 0 -#define VIRTIO_CRYPTO_SERVICE_HASH 1 -#define VIRTIO_CRYPTO_SERVICE_MAC 2 -#define VIRTIO_CRYPTO_SERVICE_AEAD 3 +#define VIRTIO_CRYPTO_SERVICE_HASH 1 +#define VIRTIO_CRYPTO_SERVICE_MAC 2 +#define VIRTIO_CRYPTO_SERVICE_AEAD 3 #define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op)) struct virtio_crypto_ctrl_header { #define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02) #define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03) #define VIRTIO_CRYPTO_HASH_CREATE_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02) #define VIRTIO_CRYPTO_HASH_DESTROY_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03) #define VIRTIO_CRYPTO_MAC_CREATE_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02) #define VIRTIO_CRYPTO_MAC_DESTROY_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03) #define VIRTIO_CRYPTO_AEAD_CREATE_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02) #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03) - __virtio32 opcode; - __virtio32 algo; - __virtio32 flag; - /* data virtqueue id */ - __virtio32 queue_id; + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03) + uint32_t opcode; + uint32_t algo; + uint32_t flag; + /* data virtqueue id */ + uint32_t queue_id; }; struct virtio_crypto_cipher_session_para { @@ -78,26 +80,27 @@ struct virtio_crypto_cipher_session_para { #define VIRTIO_CRYPTO_CIPHER_AES_F8 12 #define VIRTIO_CRYPTO_CIPHER_AES_XTS 13 #define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3 14 - __virtio32 algo; - /* length of key */ - __virtio32 keylen; + uint32_t algo; + /* length of key */ + uint32_t keylen; #define VIRTIO_CRYPTO_OP_ENCRYPT 1 #define VIRTIO_CRYPTO_OP_DECRYPT 2 - /* encrypt or decrypt */ - __virtio32 op; - __virtio32 padding; + /* encrypt or decrypt */ + uint32_t op; + uint32_t padding; }; struct virtio_crypto_session_input { - /* Device-writable part */ - __virtio64 session_id; - __virtio32 status; - __virtio32 padding; + /* Device-writable part */ + uint64_t session_id; + uint32_t status; + uint32_t padding; }; struct virtio_crypto_cipher_session_req { - struct virtio_crypto_cipher_session_para para; + struct virtio_crypto_cipher_session_para para; + uint8_t padding[32]; }; struct virtio_crypto_hash_session_para { @@ -114,13 +117,15 @@ struct virtio_crypto_hash_session_para { #define VIRTIO_CRYPTO_HASH_SHA3_512 10 #define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128 11 #define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256 12 - __virtio32 algo; - /* hash result length */ - __virtio32 hash_result_len; + uint32_t algo; + /* hash result length */ + uint32_t hash_result_len; + uint8_t padding[8]; }; struct virtio_crypto_hash_create_session_req { - struct virtio_crypto_hash_session_para para; + struct virtio_crypto_hash_session_para para; + uint8_t padding[40]; }; struct virtio_crypto_mac_session_para { @@ -140,16 +145,17 @@ struct virtio_crypto_mac_session_para { #define VIRTIO_CRYPTO_MAC_CBCMAC_AES 49 #define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 50 #define VIRTIO_CRYPTO_MAC_XCBC_AES 53 - __virtio32 algo; - /* hash result length */ - __virtio32 hash_result_len; - /* length of authenticated key */ - __virtio32 auth_key_len; - __virtio32 padding; + uint32_t algo; + /* hash result length */ + uint32_t hash_result_len; + /* length of authenticated key */ + uint32_t auth_key_len; + uint32_t padding; }; struct virtio_crypto_mac_create_session_req { - struct virtio_crypto_mac_session_para para; + struct virtio_crypto_mac_session_para para; + uint8_t padding[40]; }; struct virtio_crypto_aead_session_para { @@ -157,273 +163,288 @@ struct virtio_crypto_aead_session_para { #define VIRTIO_CRYPTO_AEAD_GCM 1 #define VIRTIO_CRYPTO_AEAD_CCM 2 #define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305 3 - __virtio32 algo; - /* length of key */ - __virtio32 key_len; - /* digest result length */ - __virtio32 digest_result_len; - /* length of the additional authenticated data (AAD) in bytes */ - __virtio32 aad_len; - /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */ - __virtio32 op; - __virtio32 padding; + uint32_t algo; + /* length of key */ + uint32_t key_len; + /* hash result length */ + uint32_t hash_result_len; + /* length of the additional authenticated data (AAD) in bytes */ + uint32_t aad_len; + /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */ + uint32_t op; + uint32_t padding; }; struct virtio_crypto_aead_create_session_req { - struct virtio_crypto_aead_session_para para; + struct virtio_crypto_aead_session_para para; + uint8_t padding[32]; }; struct virtio_crypto_alg_chain_session_para { #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2 - __virtio32 alg_chain_order; + uint32_t alg_chain_order; /* Plain hash */ #define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN 1 /* Authenticated hash (mac) */ #define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH 2 /* Nested hash */ #define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED 3 - __virtio32 hash_mode; - struct virtio_crypto_cipher_session_para cipher_param; - union { - struct virtio_crypto_hash_session_para hash_param; - struct virtio_crypto_mac_session_para mac_param; - } u; - /* length of the additional authenticated data (AAD) in bytes */ - __virtio32 aad_len; - __virtio32 padding; + uint32_t hash_mode; + struct virtio_crypto_cipher_session_para cipher_param; + union { + struct virtio_crypto_hash_session_para hash_param; + struct virtio_crypto_mac_session_para mac_param; + uint8_t padding[16]; + } u; + /* length of the additional authenticated data (AAD) in bytes */ + uint32_t aad_len; + uint32_t padding; }; struct virtio_crypto_alg_chain_session_req { - struct virtio_crypto_alg_chain_session_para para; + struct virtio_crypto_alg_chain_session_para para; }; struct virtio_crypto_sym_create_session_req { - union { - struct virtio_crypto_cipher_session_req cipher; - struct virtio_crypto_alg_chain_session_req chain; - } u; + union { + struct virtio_crypto_cipher_session_req cipher; + struct virtio_crypto_alg_chain_session_req chain; + uint8_t padding[48]; + } u; - /* Device-readable part */ + /* Device-readable part */ /* No operation */ #define VIRTIO_CRYPTO_SYM_OP_NONE 0 /* Cipher only operation on the data */ #define VIRTIO_CRYPTO_SYM_OP_CIPHER 1 -/* Chain any cipher with any hash or mac operation. The order - depends on the value of alg_chain_order param */ +/* + * Chain any cipher with any hash or mac operation. The order + * depends on the value of alg_chain_order param + */ #define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING 2 - __virtio32 op_type; - __virtio32 padding; + uint32_t op_type; + uint32_t padding; }; struct virtio_crypto_destroy_session_req { - /* Device-readable part */ - __virtio64 session_id; + /* Device-readable part */ + uint64_t session_id; + uint8_t padding[48]; }; -/* The request of the control viritqueue's packet */ +/* The request of the control virtqueue's packet */ struct virtio_crypto_op_ctrl_req { - struct virtio_crypto_ctrl_header header; - - union { - struct virtio_crypto_sym_create_session_req sym_create_session; - struct virtio_crypto_hash_create_session_req hash_create_session; - struct virtio_crypto_mac_create_session_req mac_create_session; - struct virtio_crypto_aead_create_session_req aead_create_session; - struct virtio_crypto_destroy_session_req destroy_session; - } u; + struct virtio_crypto_ctrl_header header; + + union { + struct virtio_crypto_sym_create_session_req + sym_create_session; + struct virtio_crypto_hash_create_session_req + hash_create_session; + struct virtio_crypto_mac_create_session_req + mac_create_session; + struct virtio_crypto_aead_create_session_req + aead_create_session; + struct virtio_crypto_destroy_session_req + destroy_session; + uint8_t padding[56]; + } u; }; struct virtio_crypto_op_header { #define VIRTIO_CRYPTO_CIPHER_ENCRYPT \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00) #define VIRTIO_CRYPTO_CIPHER_DECRYPT \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01) #define VIRTIO_CRYPTO_HASH \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00) #define VIRTIO_CRYPTO_MAC \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00) #define VIRTIO_CRYPTO_AEAD_ENCRYPT \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00) + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00) #define VIRTIO_CRYPTO_AEAD_DECRYPT \ - VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01) - __virtio32 opcode; - /* algo should be service-specific algorithms */ - __virtio32 algo; - /* session_id should be service-specific algorithms */ - __virtio64 session_id; - /* control flag to control the request */ - __virtio32 flag; - __virtio32 padding; + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01) + uint32_t opcode; + /* algo should be service-specific algorithms */ + uint32_t algo; + /* session_id should be service-specific algorithms */ + uint64_t session_id; + /* control flag to control the request */ + uint32_t flag; + uint32_t padding; }; struct virtio_crypto_cipher_para { - /* - * Byte Length of valid IV/Counter - * - * - For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for - * SNOW3G in UEA2 mode, this is the length of the IV (which - * must be the same as the block length of the cipher). - * - For block ciphers in CTR mode, this is the length of the counter - * (which must be the same as the block length of the cipher). - * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007. - * - * The IV/Counter will be updated after every partial cryptographic - * operation. - */ - __virtio32 iv_len; - /* length of source data */ - __virtio32 src_data_len; - /* length of dst data */ - __virtio32 dst_data_len; - __virtio32 padding; + /* + * Byte Length of valid IV/Counter + * + * For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for + * SNOW3G in UEA2 mode, this is the length of the IV (which + * must be the same as the block length of the cipher). + * For block ciphers in CTR mode, this is the length of the counter + * (which must be the same as the block length of the cipher). + * For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007. + * + * The IV/Counter will be updated after every partial cryptographic + * operation. + */ + uint32_t iv_len; + /* length of source data */ + uint32_t src_data_len; + /* length of dst data */ + uint32_t dst_data_len; + uint32_t padding; }; struct virtio_crypto_hash_para { - /* length of source data */ - __virtio32 src_data_len; - /* hash result length */ - __virtio32 hash_result_len; + /* length of source data */ + uint32_t src_data_len; + /* hash result length */ + uint32_t hash_result_len; }; struct virtio_crypto_mac_para { - struct virtio_crypto_hash_para hash; + struct virtio_crypto_hash_para hash; }; struct virtio_crypto_aead_para { - /* - * Byte Length of valid IV data pointed to by the below iv_addr - * parameter. - * - * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which - * case iv_addr points to J0. - * - For CCM mode, this is the length of the nonce, which can be in the - * range 7 to 13 inclusive. - */ - __virtio32 iv_len; - /* length of additional auth data */ - __virtio32 aad_len; - /* length of source data */ - __virtio32 src_data_len; - /* length of dst data */ - __virtio32 dst_data_len; + /* + * Byte Length of valid IV data pointed to by the below iv_addr + * parameter. + * + * For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which + * case iv_addr points to J0. + * For CCM mode, this is the length of the nonce, which can be in the + * range 7 to 13 inclusive. + */ + uint32_t iv_len; + /* length of additional auth data */ + uint32_t aad_len; + /* length of source data */ + uint32_t src_data_len; + /* length of dst data */ + uint32_t dst_data_len; }; struct virtio_crypto_cipher_data_req { - /* Device-readable part */ - struct virtio_crypto_cipher_para para; + /* Device-readable part */ + struct virtio_crypto_cipher_para para; + uint8_t padding[24]; }; struct virtio_crypto_hash_data_req { - /* Device-readable part */ - struct virtio_crypto_hash_para para; + /* Device-readable part */ + struct virtio_crypto_hash_para para; + uint8_t padding[40]; }; struct virtio_crypto_mac_data_req { - /* Device-readable part */ - struct virtio_crypto_mac_para para; + /* Device-readable part */ + struct virtio_crypto_mac_para para; + uint8_t padding[40]; }; struct virtio_crypto_alg_chain_data_para { - __virtio32 iv_len; - /* Length of source data */ - __virtio32 src_data_len; - /* Length of destination data */ - __virtio32 dst_data_len; - /* Starting point for cipher processing in source data */ - __virtio32 cipher_start_src_offset; - /* Length of the source data that the cipher will be computed on */ - __virtio32 len_to_cipher; - /* Starting point for hash processing in source data */ - __virtio32 hash_start_src_offset; - /* Length of the source data that the hash will be computed on */ - __virtio32 len_to_hash; - /* Length of the additional auth data */ - __virtio32 aad_len; - /* Length of the hash result */ - __virtio32 hash_result_len; - __virtio32 reserved; + uint32_t iv_len; + /* Length of source data */ + uint32_t src_data_len; + /* Length of destination data */ + uint32_t dst_data_len; + /* Starting point for cipher processing in source data */ + uint32_t cipher_start_src_offset; + /* Length of the source data that the cipher will be computed on */ + uint32_t len_to_cipher; + /* Starting point for hash processing in source data */ + uint32_t hash_start_src_offset; + /* Length of the source data that the hash will be computed on */ + uint32_t len_to_hash; + /* Length of the additional auth data */ + uint32_t aad_len; + /* Length of the hash result */ + uint32_t hash_result_len; + uint32_t reserved; }; struct virtio_crypto_alg_chain_data_req { - /* Device-readable part */ - struct virtio_crypto_alg_chain_data_para para; + /* Device-readable part */ + struct virtio_crypto_alg_chain_data_para para; }; struct virtio_crypto_sym_data_req { - union { - struct virtio_crypto_cipher_data_req cipher; - struct virtio_crypto_alg_chain_data_req chain; - } u; - - /* See above VIRTIO_CRYPTO_SYM_OP_* */ - __virtio32 op_type; - __virtio32 padding; + union { + struct virtio_crypto_cipher_data_req cipher; + struct virtio_crypto_alg_chain_data_req chain; + uint8_t padding[40]; + } u; + + /* See above VIRTIO_CRYPTO_SYM_OP_* */ + uint32_t op_type; + uint32_t padding; }; struct virtio_crypto_aead_data_req { - /* Device-readable part */ - struct virtio_crypto_aead_para para; + /* Device-readable part */ + struct virtio_crypto_aead_para para; + uint8_t padding[32]; }; -/* The request of the data viritqueue's packet */ +/* The request of the data virtqueue's packet */ struct virtio_crypto_op_data_req { - struct virtio_crypto_op_header header; - - union { - struct virtio_crypto_sym_data_req sym_req; - struct virtio_crypto_hash_data_req hash_req; - struct virtio_crypto_mac_data_req mac_req; - struct virtio_crypto_aead_data_req aead_req; - } u; + struct virtio_crypto_op_header header; + + union { + struct virtio_crypto_sym_data_req sym_req; + struct virtio_crypto_hash_data_req hash_req; + struct virtio_crypto_mac_data_req mac_req; + struct virtio_crypto_aead_data_req aead_req; + uint8_t padding[48]; + } u; }; #define VIRTIO_CRYPTO_OK 0 #define VIRTIO_CRYPTO_ERR 1 #define VIRTIO_CRYPTO_BADMSG 2 #define VIRTIO_CRYPTO_NOTSUPP 3 -#define VIRTIO_CRYPTO_INVSESS 4 /* Invaild session id */ +#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */ /* The accelerator hardware is ready */ #define VIRTIO_CRYPTO_S_HW_READY (1 << 0) -#define VIRTIO_CRYPTO_S_STARTED (1 << 1) struct virtio_crypto_config { - /* See VIRTIO_CRYPTO_* above */ - __virtio32 status; - - /* - * Maximum number of data queue legal values are between 1 and 0x8000 - */ - __virtio32 max_dataqueues; - - /* Specifies the services mask which the devcie support, - see VIRTIO_CRYPTO_SERVICE_* above */ - __virtio32 crypto_services; - - /* Detailed algorithms mask */ - __virtio32 cipher_algo_l; - __virtio32 cipher_algo_h; - __virtio32 hash_algo; - __virtio32 mac_algo_l; - __virtio32 mac_algo_h; - __virtio32 aead_algo; - - /* Maximum length of cipher key */ - uint32_t max_cipher_key_len; - /* Maximum length of authenticated key */ - uint32_t max_auth_key_len; - - __virtio32 reserve; - - /* The maximum size of per request's content */ - __virtio64 max_size; + /* See VIRTIO_CRYPTO_OP_* above */ + uint32_t status; + + /* + * Maximum number of data queue + */ + uint32_t max_dataqueues; + + /* + * Specifies the services mask which the device support, + * see VIRTIO_CRYPTO_SERVICE_* above + */ + uint32_t crypto_services; + + /* Detailed algorithms mask */ + uint32_t cipher_algo_l; + uint32_t cipher_algo_h; + uint32_t hash_algo; + uint32_t mac_algo_l; + uint32_t mac_algo_h; + uint32_t aead_algo; + /* Maximum length of cipher key */ + uint32_t max_cipher_key_len; + /* Maximum length of authenticated key */ + uint32_t max_auth_key_len; + uint32_t reserve; + /* Maximum size of each crypto request's content */ + uint64_t max_size; }; struct virtio_crypto_inhdr { - /* See VIRTIO_CRYPTO_* above */ - uint8_t status; + /* See VIRTIO_CRYPTO_* above */ + uint8_t status; }; - -#endif /* _LINUX_VIRTIO_CRYPTO_H */ +#endif diff --git a/include/standard-headers/linux/virtio_mmio.h b/include/standard-headers/linux/virtio_mmio.h new file mode 100644 index 0000000000..c4b09689ab --- /dev/null +++ b/include/standard-headers/linux/virtio_mmio.h @@ -0,0 +1,141 @@ +/* + * Virtio platform device driver + * + * Copyright 2011, ARM Ltd. + * + * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _LINUX_VIRTIO_MMIO_H +#define _LINUX_VIRTIO_MMIO_H + +/* + * Control registers + */ + +/* Magic value ("virt" string) - Read Only */ +#define VIRTIO_MMIO_MAGIC_VALUE 0x000 + +/* Virtio device version - Read Only */ +#define VIRTIO_MMIO_VERSION 0x004 + +/* Virtio device ID - Read Only */ +#define VIRTIO_MMIO_DEVICE_ID 0x008 + +/* Virtio vendor ID - Read Only */ +#define VIRTIO_MMIO_VENDOR_ID 0x00c + +/* Bitmask of the features supported by the device (host) + * (32 bits per set) - Read Only */ +#define VIRTIO_MMIO_DEVICE_FEATURES 0x010 + +/* Device (host) features set selector - Write Only */ +#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014 + +/* Bitmask of features activated by the driver (guest) + * (32 bits per set) - Write Only */ +#define VIRTIO_MMIO_DRIVER_FEATURES 0x020 + +/* Activated features set selector - Write Only */ +#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024 + + +#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */ + +/* Guest's memory page size in bytes - Write Only */ +#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 + +#endif + + +/* Queue selector - Write Only */ +#define VIRTIO_MMIO_QUEUE_SEL 0x030 + +/* Maximum size of the currently selected queue - Read Only */ +#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034 + +/* Queue size for the currently selected queue - Write Only */ +#define VIRTIO_MMIO_QUEUE_NUM 0x038 + + +#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */ + +/* Used Ring alignment for the currently selected queue - Write Only */ +#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c + +/* Guest's PFN for the currently selected queue - Read Write */ +#define VIRTIO_MMIO_QUEUE_PFN 0x040 + +#endif + + +/* Ready bit for the currently selected queue - Read Write */ +#define VIRTIO_MMIO_QUEUE_READY 0x044 + +/* Queue notifier - Write Only */ +#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050 + +/* Interrupt status - Read Only */ +#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060 + +/* Interrupt acknowledge - Write Only */ +#define VIRTIO_MMIO_INTERRUPT_ACK 0x064 + +/* Device status register - Read Write */ +#define VIRTIO_MMIO_STATUS 0x070 + +/* Selected queue's Descriptor Table address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080 +#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084 + +/* Selected queue's Available Ring address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090 +#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094 + +/* Selected queue's Used Ring address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 +#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 + +/* Configuration atomicity value */ +#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc + +/* The config space is defined by each driver as + * the per-driver configuration space - Read Write */ +#define VIRTIO_MMIO_CONFIG 0x100 + + + +/* + * Interrupt flags (re: interrupt status & acknowledge registers) + */ + +#define VIRTIO_MMIO_INT_VRING (1 << 0) +#define VIRTIO_MMIO_INT_CONFIG (1 << 1) + +#endif diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h index 1c9dad1b72..2bf16b203c 100644 --- a/include/sysemu/arch_init.h +++ b/include/sysemu/arch_init.h @@ -23,13 +23,12 @@ enum { QEMU_ARCH_UNICORE32 = (1 << 14), QEMU_ARCH_MOXIE = (1 << 15), QEMU_ARCH_TRICORE = (1 << 16), + QEMU_ARCH_NIOS2 = (1 << 17), }; extern const uint32_t arch_type; void select_soundhw(const char *optarg); -void do_acpitable_option(const QemuOpts *opts); -void do_smbios_option(QemuOpts *opts); void audio_init(void); int kvm_available(void); int xen_available(void); diff --git a/include/sysemu/cryptodev.h b/include/sysemu/cryptodev.h index 84526c0d35..a9d0d1ee25 100644 --- a/include/sysemu/cryptodev.h +++ b/include/sysemu/cryptodev.h @@ -202,6 +202,8 @@ struct CryptoDevBackend { Object parent_obj; bool ready; + /* Tag the cryptodev backend is used by virtio-crypto or not */ + bool is_used; CryptoDevBackendConf conf; }; @@ -295,4 +297,44 @@ int cryptodev_backend_crypto_operation( void *opaque, uint32_t queue_index, Error **errp); +/** + * cryptodev_backend_set_used: + * @backend: the cryptodev backend object + * @used: ture or false + * + * Set the cryptodev backend is used by virtio-crypto or not + */ +void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used); + +/** + * cryptodev_backend_is_used: + * @backend: the cryptodev backend object + * + * Return the status that the cryptodev backend is used + * by virtio-crypto or not + * + * Returns: true on used, or false on not used + */ +bool cryptodev_backend_is_used(CryptoDevBackend *backend); + +/** + * cryptodev_backend_set_ready: + * @backend: the cryptodev backend object + * @ready: ture or false + * + * Set the cryptodev backend is ready or not, which is called + * by the children of the cryptodev banckend interface. + */ +void cryptodev_backend_set_ready(CryptoDevBackend *backend, bool ready); + +/** + * cryptodev_backend_is_ready: + * @backend: the cryptodev backend object + * + * Return the status that the cryptodev backend is ready or not + * + * Returns: true on ready, or false on not ready + */ +bool cryptodev_backend_is_ready(CryptoDevBackend *backend); + #endif /* CRYPTODEV_H */ diff --git a/include/sysemu/hax.h b/include/sysemu/hax.h new file mode 100644 index 0000000000..d9f023918e --- /dev/null +++ b/include/sysemu/hax.h @@ -0,0 +1,56 @@ +/* + * QEMU HAXM support + * + * Copyright IBM, Corp. 2008 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * Copyright (c) 2011 Intel Corporation + * Written by: + * Jiang Yunhong<yunhong.jiang@intel.com> + * Xin Xiaohui<xiaohui.xin@intel.com> + * Zhang Xiantao<xiantao.zhang@intel.com> + * + * Copyright 2016 Google, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_HAX_H +#define QEMU_HAX_H + +#include "config-host.h" +#include "qemu-common.h" + +int hax_sync_vcpus(void); +int hax_init_vcpu(CPUState *cpu); +int hax_smp_cpu_exec(CPUState *cpu); +int hax_populate_ram(uint64_t va, uint32_t size); + +void hax_cpu_synchronize_state(CPUState *cpu); +void hax_cpu_synchronize_post_reset(CPUState *cpu); +void hax_cpu_synchronize_post_init(CPUState *cpu); + +#ifdef CONFIG_HAX + +int hax_enabled(void); + +#include "hw/hw.h" +#include "qemu/bitops.h" +#include "exec/memory.h" +int hax_vcpu_destroy(CPUState *cpu); +void hax_raise_event(CPUState *cpu); +void hax_reset_vcpu_state(void *opaque); +#include "target/i386/hax-interface.h" +#include "target/i386/hax-i386.h" + +#else /* CONFIG_HAX */ + +#define hax_enabled() (0) + +#endif /* CONFIG_HAX */ + +#endif /* QEMU_HAX_H */ diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h index 678232af40..ecae0cff19 100644 --- a/include/sysemu/hostmem.h +++ b/include/sysemu/hostmem.h @@ -52,6 +52,7 @@ struct HostMemoryBackend { Object parent; /* protected */ + char *id; uint64_t size; bool merge, dump; bool prealloc, force_prealloc, is_mapped; diff --git a/include/sysemu/hw_accel.h b/include/sysemu/hw_accel.h new file mode 100644 index 0000000000..c9b3105bc7 --- /dev/null +++ b/include/sysemu/hw_accel.h @@ -0,0 +1,48 @@ +/* + * QEMU Hardware accelertors support + * + * Copyright 2016 Google, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_HW_ACCEL_H +#define QEMU_HW_ACCEL_H + +#include "qom/cpu.h" +#include "sysemu/hax.h" +#include "sysemu/kvm.h" + +static inline void cpu_synchronize_state(CPUState *cpu) +{ + if (kvm_enabled()) { + kvm_cpu_synchronize_state(cpu); + } + if (hax_enabled()) { + hax_cpu_synchronize_state(cpu); + } +} + +static inline void cpu_synchronize_post_reset(CPUState *cpu) +{ + if (kvm_enabled()) { + kvm_cpu_synchronize_post_reset(cpu); + } + if (hax_enabled()) { + hax_cpu_synchronize_post_reset(cpu); + } +} + +static inline void cpu_synchronize_post_init(CPUState *cpu) +{ + if (kvm_enabled()) { + kvm_cpu_synchronize_post_init(cpu); + } + if (hax_enabled()) { + hax_cpu_synchronize_post_init(cpu); + } +} + +#endif /* QEMU_HW_ACCEL_H */ diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h index 68ac2de83a..e6da1a4087 100644 --- a/include/sysemu/iothread.h +++ b/include/sysemu/iothread.h @@ -28,6 +28,11 @@ typedef struct { QemuCond init_done_cond; /* is thread initialization done? */ bool stopping; int thread_id; + + /* AioContext poll parameters */ + int64_t poll_max_ns; + int64_t poll_grow; + int64_t poll_shrink; } IOThread; #define IOTHREAD(obj) \ diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index df67cc0672..3045ee7678 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -461,29 +461,6 @@ void kvm_cpu_synchronize_state(CPUState *cpu); void kvm_cpu_synchronize_post_reset(CPUState *cpu); void kvm_cpu_synchronize_post_init(CPUState *cpu); -/* generic hooks - to be moved/refactored once there are more users */ - -static inline void cpu_synchronize_state(CPUState *cpu) -{ - if (kvm_enabled()) { - kvm_cpu_synchronize_state(cpu); - } -} - -static inline void cpu_synchronize_post_reset(CPUState *cpu) -{ - if (kvm_enabled()) { - kvm_cpu_synchronize_post_reset(cpu); - } -} - -static inline void cpu_synchronize_post_init(CPUState *cpu) -{ - if (kvm_enabled()) { - kvm_cpu_synchronize_post_init(cpu); - } -} - /** * kvm_irqchip_add_msi_route - Add MSI route for specific vector * @s: KVM state diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h index 4da808a6e9..8f09dcf918 100644 --- a/include/sysemu/numa.h +++ b/include/sysemu/numa.h @@ -17,7 +17,7 @@ struct numa_addr_range { typedef struct node_info { uint64_t node_mem; - DECLARE_BITMAP(node_cpu, MAX_CPUMASK_BITS); + unsigned long *node_cpu; struct HostMemoryBackend *node_memdev; bool present; QLIST_HEAD(, numa_addr_range) addr; /* List to store address ranges */ diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index f80d6d28e8..abb35ca8c9 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -39,6 +39,8 @@ enum ReplayCheckpoint { }; typedef enum ReplayCheckpoint ReplayCheckpoint; +typedef struct ReplayNetState ReplayNetState; + extern ReplayMode replay_mode; /* Replay process control functions */ @@ -137,4 +139,14 @@ void replay_char_read_all_save_error(int res); /*! Writes character read_all execution result into the replay log. */ void replay_char_read_all_save_buf(uint8_t *buf, int offset); +/* Network */ + +/*! Registers replay network filter attached to some backend. */ +ReplayNetState *replay_register_net(NetFilterState *nfs); +/*! Unregisters replay network filter. */ +void replay_unregister_net(ReplayNetState *rns); +/*! Called to write network packet to the replay log. */ +void replay_net_packet_event(ReplayNetState *rns, unsigned flags, + const struct iovec *iov, int iovcnt); + #endif diff --git a/include/sysemu/reset.h b/include/sysemu/reset.h new file mode 100644 index 0000000000..0b0d6d7598 --- /dev/null +++ b/include/sysemu/reset.h @@ -0,0 +1,10 @@ +#ifndef QEMU_SYSEMU_RESET_H +#define QEMU_SYSEMU_RESET_H + +typedef void QEMUResetHandler(void *opaque); + +void qemu_register_reset(QEMUResetHandler *func, void *opaque); +void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); +void qemu_devices_reset(void); + +#endif diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index 66c6f1577e..ff8ffb5e47 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -63,7 +63,6 @@ void qemu_system_vmstop_request_prepare(void); int qemu_shutdown_requested_get(void); int qemu_reset_requested_get(void); void qemu_system_killed(int signal, pid_t pid); -void qemu_devices_reset(void); void qemu_system_reset(bool report); void qemu_system_guest_panicked(void); size_t qemu_target_page_bits(void); @@ -168,13 +167,6 @@ extern int mem_prealloc; #define MAX_NODES 128 #define NUMA_NODE_UNASSIGNED MAX_NODES -/* The following shall be true for all CPUs: - * cpu->cpu_index < max_cpus <= MAX_CPUMASK_BITS - * - * Note that cpu->get_arch_id() may be larger than MAX_CPUMASK_BITS. - */ -#define MAX_CPUMASK_BITS 288 - #define MAX_OPTION_ROMS 16 typedef struct QEMUOptionRom { const char *name; diff --git a/include/ui/console.h b/include/ui/console.h index e2589e2134..b59e7b8c15 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -337,7 +337,10 @@ static inline pixman_format_code_t surface_format(DisplaySurface *s) } #ifdef CONFIG_CURSES +/* KEY_EVENT is defined in wincon.h and in curses.h. Avoid redefinition. */ +#undef KEY_EVENT #include <curses.h> +#undef KEY_EVENT typedef chtype console_ch_t; extern chtype vga_to_curses[]; #else @@ -394,6 +397,10 @@ uint32_t qemu_console_get_head(QemuConsole *con); QemuUIInfo *qemu_console_get_ui_info(QemuConsole *con); int qemu_console_get_width(QemuConsole *con, int fallback); int qemu_console_get_height(QemuConsole *con, int fallback); +/* Return the low-level window id for the console */ +int qemu_console_get_window_id(QemuConsole *con); +/* Set the low-level window id for the console */ +void qemu_console_set_window_id(QemuConsole *con, int window_id); void console_select(unsigned int index); void qemu_console_resize(QemuConsole *con, int width, int height); diff --git a/include/ui/gtk.h b/include/ui/gtk.h index 42ca0fea8b..b3b50059c7 100644 --- a/include/ui/gtk.h +++ b/include/ui/gtk.h @@ -18,6 +18,10 @@ #include <X11/XKBlib.h> #endif +#ifdef GDK_WINDOWING_WAYLAND +#include <gdk/gdkwayland.h> +#endif + #if defined(CONFIG_OPENGL) #include "ui/egl-helpers.h" #include "ui/egl-context.h" |