aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/block/block.h5
-rw-r--r--include/exec/cpu-all.h5
-rw-r--r--include/exec/cpu-common.h3
-rw-r--r--include/exec/cpu-defs.h1
-rw-r--r--include/exec/ioport.h30
-rw-r--r--include/exec/iorange.h31
-rw-r--r--include/exec/memory-internal.h2
-rw-r--r--include/exec/memory.h156
-rw-r--r--include/hw/i386/pc.h6
-rw-r--r--include/hw/pci-host/pam.h4
-rw-r--r--include/hw/pci/pci.h2
-rw-r--r--include/hw/ppc/mac_dbdma.h124
-rw-r--r--include/hw/ppc/ppc.h2
-rw-r--r--include/hw/ppc/ppc_e500.h6
-rw-r--r--include/hw/ppc/spapr.h3
-rw-r--r--include/hw/virtio/dataplane/hostmem.h1
-rw-r--r--include/migration/migration.h2
-rw-r--r--include/qemu-common.h1
-rw-r--r--include/qemu/atomic.h198
-rw-r--r--include/qemu/error-report.h2
-rw-r--r--include/qemu/int128.h25
-rw-r--r--include/qemu/log.h26
-rw-r--r--include/qom/cpu.h20
-rw-r--r--include/sysemu/dma.h4
-rw-r--r--include/sysemu/kvm.h10
-rw-r--r--include/sysemu/sysemu.h6
26 files changed, 498 insertions, 177 deletions
diff --git a/include/block/block.h b/include/block/block.h
index dd8eca1be1..b6b9014a9c 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -111,7 +111,8 @@ bool bdrv_io_limits_enabled(BlockDriverState *bs);
void bdrv_init(void);
void bdrv_init_with_whitelist(void);
-BlockDriver *bdrv_find_protocol(const char *filename);
+BlockDriver *bdrv_find_protocol(const char *filename,
+ bool allow_protocol_prefix);
BlockDriver *bdrv_find_format(const char *format_name);
BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
bool readonly);
@@ -266,7 +267,7 @@ void bdrv_clear_incoming_migration_all(void);
/* Ensure contents are flushed to disk. */
int bdrv_flush(BlockDriverState *bs);
int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
-void bdrv_flush_all(void);
+int bdrv_flush_all(void);
void bdrv_close_all(void);
void bdrv_drain_all(void);
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 35bdf858f2..5084202217 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -20,7 +20,6 @@
#define CPU_ALL_H
#include "qemu-common.h"
-#include "qemu/tls.h"
#include "exec/cpu-common.h"
#include "qemu/thread.h"
@@ -357,9 +356,6 @@ CPUArchState *cpu_copy(CPUArchState *env);
void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
-extern CPUArchState *first_cpu;
-DECLARE_TLS(CPUArchState *,cpu_single_env);
-#define cpu_single_env tls_var(cpu_single_env)
/* Flags for use in ENV->INTERRUPT_PENDING.
@@ -447,7 +443,6 @@ hwaddr cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
/* memory API */
-extern int phys_ram_fd;
extern ram_addr_t ram_size;
/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 5240ae2ac2..e4996e19c3 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -52,8 +52,7 @@ typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should not be used by devices. */
-int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
-ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
+MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev);
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index c4ac929875..39094b3f48 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -181,7 +181,6 @@ typedef struct CPUWatchpoint {
sigjmp_buf jmp_env; \
int exception_index; \
\
- CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
/* user data */ \
void *opaque; \
\
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
index fc28350a3c..bdd4e964eb 100644
--- a/include/exec/ioport.h
+++ b/include/exec/ioport.h
@@ -25,7 +25,8 @@
#define IOPORT_H
#include "qemu-common.h"
-#include "exec/iorange.h"
+#include "qom/object.h"
+#include "exec/memory.h"
typedef uint32_t pio_addr_t;
#define FMT_pioaddr PRIx32
@@ -33,18 +34,16 @@ typedef uint32_t pio_addr_t;
#define MAX_IOPORTS (64 * 1024)
#define IOPORTS_MASK (MAX_IOPORTS - 1)
-/* These should really be in isa.h, but are here to make pc.h happy. */
-typedef void (IOPortWriteFunc)(void *opaque, uint32_t address, uint32_t data);
-typedef uint32_t (IOPortReadFunc)(void *opaque, uint32_t address);
-typedef void (IOPortDestructor)(void *opaque);
+typedef struct MemoryRegionPortio {
+ uint32_t offset;
+ uint32_t len;
+ unsigned size;
+ uint32_t (*read)(void *opaque, uint32_t address);
+ void (*write)(void *opaque, uint32_t address, uint32_t data);
+ uint32_t base; /* private field */
+} MemoryRegionPortio;
-void ioport_register(IORange *iorange);
-int register_ioport_read(pio_addr_t start, int length, int size,
- IOPortReadFunc *func, void *opaque);
-int register_ioport_write(pio_addr_t start, int length, int size,
- IOPortWriteFunc *func, void *opaque);
-void isa_unassign_ioport(pio_addr_t start, int length);
-bool isa_is_ioport_assigned(pio_addr_t start);
+#define PORTIO_END_OF_LIST() { }
void cpu_outb(pio_addr_t addr, uint8_t val);
void cpu_outw(pio_addr_t addr, uint16_t val);
@@ -53,20 +52,17 @@ uint8_t cpu_inb(pio_addr_t addr);
uint16_t cpu_inw(pio_addr_t addr);
uint32_t cpu_inl(pio_addr_t addr);
-struct MemoryRegion;
-struct MemoryRegionPortio;
-
typedef struct PortioList {
const struct MemoryRegionPortio *ports;
+ Object *owner;
struct MemoryRegion *address_space;
unsigned nr;
struct MemoryRegion **regions;
- struct MemoryRegion **aliases;
void *opaque;
const char *name;
} PortioList;
-void portio_list_init(PortioList *piolist,
+void portio_list_init(PortioList *piolist, Object *owner,
const struct MemoryRegionPortio *callbacks,
void *opaque, const char *name);
void portio_list_destroy(PortioList *piolist);
diff --git a/include/exec/iorange.h b/include/exec/iorange.h
deleted file mode 100644
index cd980a8312..0000000000
--- a/include/exec/iorange.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef IORANGE_H
-#define IORANGE_H
-
-#include <stdint.h>
-
-typedef struct IORange IORange;
-typedef struct IORangeOps IORangeOps;
-
-struct IORangeOps {
- void (*read)(IORange *iorange, uint64_t offset, unsigned width,
- uint64_t *data);
- void (*write)(IORange *iorange, uint64_t offset, unsigned width,
- uint64_t data);
- void (*destructor)(IORange *iorange);
-};
-
-struct IORange {
- const IORangeOps *ops;
- uint64_t base;
- uint64_t len;
-};
-
-static inline void iorange_init(IORange *iorange, const IORangeOps *ops,
- uint64_t base, uint64_t len)
-{
- iorange->ops = ops;
- iorange->base = base;
- iorange->len = len;
-}
-
-#endif
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
index 26689fe252..d0e063392a 100644
--- a/include/exec/memory-internal.h
+++ b/include/exec/memory-internal.h
@@ -119,8 +119,6 @@ static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags);
-extern const IORangeOps memory_region_iorange_ops;
-
#endif
#endif
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 2ddc3c5393..ebe0d24182 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -24,8 +24,6 @@
#include "exec/hwaddr.h"
#endif
#include "qemu/queue.h"
-#include "exec/iorange.h"
-#include "exec/ioport.h"
#include "qemu/int128.h"
#include "qemu/notify.h"
@@ -33,7 +31,6 @@
#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
typedef struct MemoryRegionOps MemoryRegionOps;
-typedef struct MemoryRegionPortio MemoryRegionPortio;
typedef struct MemoryRegionMmio MemoryRegionMmio;
/* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
@@ -48,14 +45,6 @@ struct MemoryRegionMmio {
CPUWriteMemoryFunc *write[3];
};
-/* Internal use; thunks between old-style IORange and MemoryRegions. */
-typedef struct MemoryRegionIORange MemoryRegionIORange;
-struct MemoryRegionIORange {
- IORange iorange;
- MemoryRegion *mr;
- hwaddr offset;
-};
-
typedef struct IOMMUTLBEntry IOMMUTLBEntry;
/* See address_space_translate: bit 0 is read, bit 1 is write. */
@@ -126,10 +115,6 @@ struct MemoryRegionOps {
bool unaligned;
} impl;
- /* If .read and .write are not present, old_portio may be used for
- * backwards compatibility with old portio registration
- */
- const MemoryRegionPortio *old_portio;
/* If .read and .write are not present, old_mmio may be used for
* backwards compatibility with old mmio registration
*/
@@ -151,6 +136,7 @@ struct MemoryRegion {
const MemoryRegionOps *ops;
const MemoryRegionIOMMUOps *iommu_ops;
void *opaque;
+ struct Object *owner;
MemoryRegion *parent;
Int128 size;
hwaddr addr;
@@ -179,15 +165,38 @@ struct MemoryRegion {
NotifierList iommu_notify;
};
-struct MemoryRegionPortio {
- uint32_t offset;
- uint32_t len;
- unsigned size;
- IOPortReadFunc *read;
- IOPortWriteFunc *write;
-};
+typedef struct MemoryListener MemoryListener;
-#define PORTIO_END_OF_LIST() { }
+/**
+ * MemoryListener: callbacks structure for updates to the physical memory map
+ *
+ * Allows a component to adjust to changes in the guest-visible memory map.
+ * Use with memory_listener_register() and memory_listener_unregister().
+ */
+struct MemoryListener {
+ void (*begin)(MemoryListener *listener);
+ void (*commit)(MemoryListener *listener);
+ void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_global_start)(MemoryListener *listener);
+ void (*log_global_stop)(MemoryListener *listener);
+ void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+ void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+ void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ /* Lower = earlier (during add), later (during del) */
+ unsigned priority;
+ AddressSpace *address_space_filter;
+ QTAILQ_ENTRY(MemoryListener) link;
+};
/**
* AddressSpace: describes a mapping of addresses to #MemoryRegion objects
@@ -200,6 +209,9 @@ struct AddressSpace {
int ioeventfd_nb;
struct MemoryRegionIoeventfd *ioeventfds;
struct AddressSpaceDispatch *dispatch;
+ struct AddressSpaceDispatch *next_dispatch;
+ MemoryListener dispatch_listener;
+
QTAILQ_ENTRY(AddressSpace) address_spaces_link;
};
@@ -223,39 +235,6 @@ struct MemoryRegionSection {
bool readonly;
};
-typedef struct MemoryListener MemoryListener;
-
-/**
- * MemoryListener: callbacks structure for updates to the physical memory map
- *
- * Allows a component to adjust to changes in the guest-visible memory map.
- * Use with memory_listener_register() and memory_listener_unregister().
- */
-struct MemoryListener {
- void (*begin)(MemoryListener *listener);
- void (*commit)(MemoryListener *listener);
- void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
- void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
- void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
- void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
- void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
- void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
- void (*log_global_start)(MemoryListener *listener);
- void (*log_global_stop)(MemoryListener *listener);
- void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
- bool match_data, uint64_t data, EventNotifier *e);
- void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
- bool match_data, uint64_t data, EventNotifier *e);
- void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
- hwaddr addr, hwaddr len);
- void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
- hwaddr addr, hwaddr len);
- /* Lower = earlier (during add), later (during del) */
- unsigned priority;
- AddressSpace *address_space_filter;
- QTAILQ_ENTRY(MemoryListener) link;
-};
-
/**
* memory_region_init: Initialize a memory region
*
@@ -263,12 +242,44 @@ struct MemoryListener {
* memory_region_add_subregion() to add subregions.
*
* @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
* @name: used for debugging; not visible to the user or ABI
* @size: size of the region; any subregions beyond this size will be clipped
*/
void memory_region_init(MemoryRegion *mr,
+ struct Object *owner,
const char *name,
uint64_t size);
+
+/**
+ * memory_region_ref: Add 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug. MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function adds a reference to the owner.
+ *
+ * All MemoryRegions must have an owner if they can disappear, even if the
+ * device they belong to operates exclusively under the BQL. This is because
+ * the region could be returned at any time by memory_region_find, and this
+ * is usually under guest control.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_ref(MemoryRegion *mr);
+
+/**
+ * memory_region_unref: Remove 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug. MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function removes a reference to the owner and possibly destroys it.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_unref(MemoryRegion *mr);
+
/**
* memory_region_init_io: Initialize an I/O memory region.
*
@@ -276,6 +287,7 @@ void memory_region_init(MemoryRegion *mr,
* if @size is nonzero, subregions will be clipped to @size.
*
* @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
* @ops: a structure containing read and write callbacks to be used when
* I/O is performed on the region.
* @opaque: passed to to the read and write callbacks of the @ops structure.
@@ -283,6 +295,7 @@ void memory_region_init(MemoryRegion *mr,
* @size: size of the region.
*/
void memory_region_init_io(MemoryRegion *mr,
+ struct Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
@@ -293,10 +306,12 @@ void memory_region_init_io(MemoryRegion *mr,
* region will modify memory directly.
*
* @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
* @name: the name of the region.
* @size: size of the region.
*/
void memory_region_init_ram(MemoryRegion *mr,
+ struct Object *owner,
const char *name,
uint64_t size);
@@ -306,11 +321,13 @@ void memory_region_init_ram(MemoryRegion *mr,
* region will modify memory directly.
*
* @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
* @name: the name of the region.
* @size: size of the region.
* @ptr: memory to be mapped; must contain at least @size bytes.
*/
void memory_region_init_ram_ptr(MemoryRegion *mr,
+ struct Object *owner,
const char *name,
uint64_t size,
void *ptr);
@@ -320,6 +337,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
* part of another memory region.
*
* @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
* @name: used for debugging; not visible to the user or ABI
* @orig: the region to be referenced; @mr will be equivalent to
* @orig between @offset and @offset + @size - 1.
@@ -327,6 +345,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
* @size: size of the region.
*/
void memory_region_init_alias(MemoryRegion *mr,
+ struct Object *owner,
const char *name,
MemoryRegion *orig,
hwaddr offset,
@@ -337,11 +356,13 @@ void memory_region_init_alias(MemoryRegion *mr,
* handled via callbacks.
*
* @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
* @ops: callbacks for write access handling.
* @name: the name of the region.
* @size: size of the region.
*/
void memory_region_init_rom_device(MemoryRegion *mr,
+ struct Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
@@ -356,10 +377,12 @@ void memory_region_init_rom_device(MemoryRegion *mr,
* the memory API will cause an abort().
*
* @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
* @name: used for debugging; not visible to the user or ABI
* @size: size of the region.
*/
void memory_region_init_reservation(MemoryRegion *mr,
+ struct Object *owner,
const char *name,
uint64_t size);
@@ -371,11 +394,13 @@ void memory_region_init_reservation(MemoryRegion *mr,
* memory region.
*
* @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
* @ops: a function that translates addresses into the @target region
* @name: used for debugging; not visible to the user or ABI
* @size: size of the region.
*/
void memory_region_init_iommu(MemoryRegion *mr,
+ struct Object *owner,
const MemoryRegionIOMMUOps *ops,
const char *name,
uint64_t size);
@@ -390,6 +415,13 @@ void memory_region_init_iommu(MemoryRegion *mr,
void memory_region_destroy(MemoryRegion *mr);
/**
+ * memory_region_owner: get a memory region's owner.
+ *
+ * @mr: the memory region being queried.
+ */
+struct Object *memory_region_owner(MemoryRegion *mr);
+
+/**
* memory_region_size: get a memory region's size.
*
* @mr: the memory region being queried.
@@ -808,6 +840,18 @@ void memory_region_set_alias_offset(MemoryRegion *mr,
hwaddr offset);
/**
+ * memory_region_present: translate an address/size relative to a
+ * MemoryRegion into a #MemoryRegionSection.
+ *
+ * Answer whether a #MemoryRegion within @parent covers the address
+ * @addr.
+ *
+ * @parent: a MemoryRegion within which @addr is a relative address
+ * @addr: the area within @parent to be searched
+ */
+bool memory_region_present(MemoryRegion *parent, hwaddr addr);
+
+/**
* memory_region_find: translate an address/size relative to a
* MemoryRegion into a #MemoryRegionSection.
*
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index 751592a1e7..7fb97b08a2 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -3,7 +3,6 @@
#include "qemu-common.h"
#include "exec/memory.h"
-#include "exec/ioport.h"
#include "hw/isa/isa.h"
#include "hw/block/fdc.h"
#include "net/net.h"
@@ -69,11 +68,14 @@ typedef struct GSIState {
void gsi_handler(void *opaque, int n, int level);
/* vmport.c */
+typedef uint32_t (VMPortReadFunc)(void *opaque, uint32_t address);
+
static inline void vmport_init(ISABus *bus)
{
isa_create_simple(bus, "vmport");
}
-void vmport_register(unsigned char command, IOPortReadFunc *func, void *opaque);
+
+void vmport_register(unsigned char command, VMPortReadFunc *func, void *opaque);
void vmmouse_get_data(uint32_t *data);
void vmmouse_set_data(const uint32_t *data);
diff --git a/include/hw/pci-host/pam.h b/include/hw/pci-host/pam.h
index 8e9e349b1d..a8b87b89a7 100644
--- a/include/hw/pci-host/pam.h
+++ b/include/hw/pci-host/pam.h
@@ -90,8 +90,8 @@ void smram_update(MemoryRegion *smram_region, uint8_t smram,
uint8_t smm_enabled);
void smram_set_smm(uint8_t *host_smm_enabled, int smm, uint8_t smram,
MemoryRegion *smram_region);
-void init_pam(MemoryRegion *ram, MemoryRegion *system, MemoryRegion *pci,
- PAMMemoryRegion *mem, uint32_t start, uint32_t size);
+void init_pam(DeviceState *dev, MemoryRegion *ram, MemoryRegion *system,
+ MemoryRegion *pci, PAMMemoryRegion *mem, uint32_t start, uint32_t size);
void pam_update(PAMMemoryRegion *mem, int idx, uint8_t val);
#endif /* QEMU_PAM_H */
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index 209dda4f79..ccec2bac31 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -705,7 +705,7 @@ static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len,
static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev,
int alloc_hint)
{
- qemu_sglist_init(qsg, alloc_hint, pci_get_address_space(dev));
+ qemu_sglist_init(qsg, DEVICE(dev), alloc_hint, pci_get_address_space(dev));
}
extern const VMStateDescription vmstate_pci_device;
diff --git a/include/hw/ppc/mac_dbdma.h b/include/hw/ppc/mac_dbdma.h
index 691263eede..90efd277e4 100644
--- a/include/hw/ppc/mac_dbdma.h
+++ b/include/hw/ppc/mac_dbdma.h
@@ -37,12 +37,136 @@ struct DBDMA_io {
int is_last;
int is_dma_out;
DBDMA_end dma_end;
+ /* DMA is in progress, don't start another one */
+ bool processing;
+ /* unaligned last sector of a request */
+ uint8_t remainder[0x200];
+ int remainder_len;
};
+/*
+ * DBDMA control/status registers. All little-endian.
+ */
+
+#define DBDMA_CONTROL 0x00
+#define DBDMA_STATUS 0x01
+#define DBDMA_CMDPTR_HI 0x02
+#define DBDMA_CMDPTR_LO 0x03
+#define DBDMA_INTR_SEL 0x04
+#define DBDMA_BRANCH_SEL 0x05
+#define DBDMA_WAIT_SEL 0x06
+#define DBDMA_XFER_MODE 0x07
+#define DBDMA_DATA2PTR_HI 0x08
+#define DBDMA_DATA2PTR_LO 0x09
+#define DBDMA_RES1 0x0A
+#define DBDMA_ADDRESS_HI 0x0B
+#define DBDMA_BRANCH_ADDR_HI 0x0C
+#define DBDMA_RES2 0x0D
+#define DBDMA_RES3 0x0E
+#define DBDMA_RES4 0x0F
+
+#define DBDMA_REGS 16
+#define DBDMA_SIZE (DBDMA_REGS * sizeof(uint32_t))
+
+#define DBDMA_CHANNEL_SHIFT 7
+#define DBDMA_CHANNEL_SIZE (1 << DBDMA_CHANNEL_SHIFT)
+
+#define DBDMA_CHANNELS (0x1000 >> DBDMA_CHANNEL_SHIFT)
+
+/* Bits in control and status registers */
+
+#define RUN 0x8000
+#define PAUSE 0x4000
+#define FLUSH 0x2000
+#define WAKE 0x1000
+#define DEAD 0x0800
+#define ACTIVE 0x0400
+#define BT 0x0100
+#define DEVSTAT 0x00ff
+
+/*
+ * DBDMA command structure. These fields are all little-endian!
+ */
+
+typedef struct dbdma_cmd {
+ uint16_t req_count; /* requested byte transfer count */
+ uint16_t command; /* command word (has bit-fields) */
+ uint32_t phy_addr; /* physical data address */
+ uint32_t cmd_dep; /* command-dependent field */
+ uint16_t res_count; /* residual count after completion */
+ uint16_t xfer_status; /* transfer status */
+} dbdma_cmd;
+
+/* DBDMA command values in command field */
+
+#define COMMAND_MASK 0xf000
+#define OUTPUT_MORE 0x0000 /* transfer memory data to stream */
+#define OUTPUT_LAST 0x1000 /* ditto followed by end marker */
+#define INPUT_MORE 0x2000 /* transfer stream data to memory */
+#define INPUT_LAST 0x3000 /* ditto, expect end marker */
+#define STORE_WORD 0x4000 /* write word (4 bytes) to device reg */
+#define LOAD_WORD 0x5000 /* read word (4 bytes) from device reg */
+#define DBDMA_NOP 0x6000 /* do nothing */
+#define DBDMA_STOP 0x7000 /* suspend processing */
+
+/* Key values in command field */
+
+#define KEY_MASK 0x0700
+#define KEY_STREAM0 0x0000 /* usual data stream */
+#define KEY_STREAM1 0x0100 /* control/status stream */
+#define KEY_STREAM2 0x0200 /* device-dependent stream */
+#define KEY_STREAM3 0x0300 /* device-dependent stream */
+#define KEY_STREAM4 0x0400 /* reserved */
+#define KEY_REGS 0x0500 /* device register space */
+#define KEY_SYSTEM 0x0600 /* system memory-mapped space */
+#define KEY_DEVICE 0x0700 /* device memory-mapped space */
+
+/* Interrupt control values in command field */
+
+#define INTR_MASK 0x0030
+#define INTR_NEVER 0x0000 /* don't interrupt */
+#define INTR_IFSET 0x0010 /* intr if condition bit is 1 */
+#define INTR_IFCLR 0x0020 /* intr if condition bit is 0 */
+#define INTR_ALWAYS 0x0030 /* always interrupt */
+
+/* Branch control values in command field */
+
+#define BR_MASK 0x000c
+#define BR_NEVER 0x0000 /* don't branch */
+#define BR_IFSET 0x0004 /* branch if condition bit is 1 */
+#define BR_IFCLR 0x0008 /* branch if condition bit is 0 */
+#define BR_ALWAYS 0x000c /* always branch */
+
+/* Wait control values in command field */
+
+#define WAIT_MASK 0x0003
+#define WAIT_NEVER 0x0000 /* don't wait */
+#define WAIT_IFSET 0x0001 /* wait if condition bit is 1 */
+#define WAIT_IFCLR 0x0002 /* wait if condition bit is 0 */
+#define WAIT_ALWAYS 0x0003 /* always wait */
+
+typedef struct DBDMA_channel {
+ int channel;
+ uint32_t regs[DBDMA_REGS];
+ qemu_irq irq;
+ DBDMA_io io;
+ DBDMA_rw rw;
+ DBDMA_flush flush;
+ dbdma_cmd current;
+} DBDMA_channel;
+
+typedef struct {
+ MemoryRegion mem;
+ DBDMA_channel channels[DBDMA_CHANNELS];
+ QEMUBH *bh;
+} DBDMAState;
+
+/* Externally callable functions */
void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
DBDMA_rw rw, DBDMA_flush flush,
void *opaque);
+void DBDMA_kick(DBDMAState *dbdma);
void* DBDMA_init (MemoryRegion **dbdma_mem);
#endif
diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h
index dfcad259b2..132ab97b58 100644
--- a/include/hw/ppc/ppc.h
+++ b/include/hw/ppc/ppc.h
@@ -73,8 +73,6 @@ void ppc6xx_irq_init (CPUPPCState *env);
void ppc970_irq_init (CPUPPCState *env);
void ppcPOWER7_irq_init (CPUPPCState *env);
-void ppce500_set_mpic_proxy(bool enabled);
-
/* PPC machines for OpenBIOS */
enum {
ARCH_PREP = 0,
diff --git a/include/hw/ppc/ppc_e500.h b/include/hw/ppc/ppc_e500.h
new file mode 100644
index 0000000000..b66c0e3ee0
--- /dev/null
+++ b/include/hw/ppc/ppc_e500.h
@@ -0,0 +1,6 @@
+#ifndef HW_PPC_E500_H
+#define HW_PPC_E500_H
+
+void ppce500_set_mpic_proxy(bool enabled);
+
+#endif
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 09c4570982..de95480734 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -348,7 +348,8 @@ typedef struct sPAPRTCETable sPAPRTCETable;
void spapr_iommu_init(void);
void spapr_events_init(sPAPREnvironment *spapr);
void spapr_events_fdt_skel(void *fdt, uint32_t epow_irq);
-sPAPRTCETable *spapr_tce_new_table(uint32_t liobn, size_t window_size);
+sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn,
+ size_t window_size);
MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet);
void spapr_tce_free(sPAPRTCETable *tcet);
void spapr_tce_reset(sPAPRTCETable *tcet);
diff --git a/include/hw/virtio/dataplane/hostmem.h b/include/hw/virtio/dataplane/hostmem.h
index b2cf09333f..2810f4b44e 100644
--- a/include/hw/virtio/dataplane/hostmem.h
+++ b/include/hw/virtio/dataplane/hostmem.h
@@ -18,6 +18,7 @@
#include "qemu/thread.h"
typedef struct {
+ MemoryRegion *mr;
void *host_addr;
hwaddr guest_addr;
uint64_t size;
diff --git a/include/migration/migration.h b/include/migration/migration.h
index f0640e0eec..bc9fde0b2a 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -125,6 +125,8 @@ void migrate_del_blocker(Error *reason);
bool migrate_rdma_pin_all(void);
+bool migrate_auto_converge(void);
+
int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen,
uint8_t *dst, int dlen);
int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen);
diff --git a/include/qemu-common.h b/include/qemu-common.h
index f4397388f5..6948bb9177 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -293,6 +293,7 @@ struct qemu_work_item {
void (*func)(void *data);
void *data;
int done;
+ bool free;
};
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 10becb6101..0aa8913301 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -1,68 +1,202 @@
-#ifndef __QEMU_BARRIER_H
-#define __QEMU_BARRIER_H 1
+/*
+ * Simple interface for atomic operations.
+ *
+ * Copyright (C) 2013 Red Hat, Inc.
+ *
+ * Author: Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
-/* Compiler barrier */
-#define barrier() asm volatile("" ::: "memory")
+#ifndef __QEMU_ATOMIC_H
+#define __QEMU_ATOMIC_H 1
-#if defined(__i386__)
+#include "qemu/compiler.h"
-#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */
+/* For C11 atomic ops */
-/*
- * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
- * on x86(well, a compiler barrier only). Well, at least as long as
- * qemu doesn't do accesses to write-combining memory or non-temporal
- * load/stores from C code.
- */
-#define smp_wmb() barrier()
-#define smp_rmb() barrier()
+/* Compiler barrier */
+#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
+
+#ifndef __ATOMIC_RELAXED
/*
- * We use GCC builtin if it's available, as that can use
- * mfence on 32 bit as well, e.g. if built with -march=pentium-m.
- * However, on i386, there seem to be known bugs as recently as 4.3.
- * */
-#if QEMU_GNUC_PREREQ(4, 4)
-#define smp_mb() __sync_synchronize()
+ * We use GCC builtin if it's available, as that can use mfence on
+ * 32-bit as well, e.g. if built with -march=pentium-m. However, on
+ * i386 the spec is buggy, and the implementation followed it until
+ * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
+ */
+#if defined(__i386__) || defined(__x86_64__)
+#if !QEMU_GNUC_PREREQ(4, 4)
+#if defined __x86_64__
+#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
#else
-#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
+#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
+#endif
+#endif
+#endif
+
+
+#ifdef __alpha__
+#define smp_read_barrier_depends() asm volatile("mb":::"memory")
#endif
-#elif defined(__x86_64__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
+/*
+ * Because of the strongly ordered storage model, wmb() and rmb() are nops
+ * here (a compiler barrier only). QEMU doesn't do accesses to write-combining
+ * qemu memory or non-temporal load/stores from C code.
+ */
#define smp_wmb() barrier()
#define smp_rmb() barrier()
-#define smp_mb() asm volatile("mfence" ::: "memory")
+
+/*
+ * __sync_lock_test_and_set() is documented to be an acquire barrier only,
+ * but it is a full barrier at the hardware level. Add a compiler barrier
+ * to make it a full barrier also at the compiler level.
+ */
+#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
+
+/*
+ * Load/store with Java volatile semantics.
+ */
+#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
#elif defined(_ARCH_PPC)
/*
* We use an eieio() for wmb() on powerpc. This assumes we don't
* need to order cacheable and non-cacheable stores with respect to
- * each other
+ * each other.
+ *
+ * smp_mb has the same problem as on x86 for not-very-new GCC
+ * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
*/
-#define smp_wmb() asm volatile("eieio" ::: "memory")
-
+#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
#if defined(__powerpc64__)
-#define smp_rmb() asm volatile("lwsync" ::: "memory")
+#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
#else
-#define smp_rmb() asm volatile("sync" ::: "memory")
+#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
#endif
+#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
-#define smp_mb() asm volatile("sync" ::: "memory")
+#endif /* _ARCH_PPC */
-#else
+#endif /* C11 atomics */
/*
* For (host) platforms we don't have explicit barrier definitions
* for, we use the gcc __sync_synchronize() primitive to generate a
* full barrier. This should be safe on all platforms, though it may
- * be overkill for wmb() and rmb().
+ * be overkill for smp_wmb() and smp_rmb().
*/
+#ifndef smp_mb
+#define smp_mb() __sync_synchronize()
+#endif
+
+#ifndef smp_wmb
+#ifdef __ATOMIC_RELEASE
+#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE)
+#else
#define smp_wmb() __sync_synchronize()
-#define smp_mb() __sync_synchronize()
+#endif
+#endif
+
+#ifndef smp_rmb
+#ifdef __ATOMIC_ACQUIRE
+#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE)
+#else
#define smp_rmb() __sync_synchronize()
+#endif
+#endif
+
+#ifndef smp_read_barrier_depends
+#ifdef __ATOMIC_CONSUME
+#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME)
+#else
+#define smp_read_barrier_depends() barrier()
+#endif
+#endif
+#ifndef atomic_read
+#define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr))
#endif
+#ifndef atomic_set
+#define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i))
+#endif
+
+/* These have the same semantics as Java volatile variables.
+ * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
+ * "1. Issue a StoreStore barrier (wmb) before each volatile store."
+ * 2. Issue a StoreLoad barrier after each volatile store.
+ * Note that you could instead issue one before each volatile load, but
+ * this would be slower for typical programs using volatiles in which
+ * reads greatly outnumber writes. Alternatively, if available, you
+ * can implement volatile store as an atomic instruction (for example
+ * XCHG on x86) and omit the barrier. This may be more efficient if
+ * atomic instructions are cheaper than StoreLoad barriers.
+ * 3. Issue LoadLoad and LoadStore barriers after each volatile load."
+ *
+ * If you prefer to think in terms of "pairing" of memory barriers,
+ * an atomic_mb_read pairs with an atomic_mb_set.
+ *
+ * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq,
+ * while an atomic_mb_set is a st.rel followed by a memory barrier.
+ *
+ * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST
+ * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
+ * Just always use the barriers manually by the rules above.
+ */
+#ifndef atomic_mb_read
+#define atomic_mb_read(ptr) ({ \
+ typeof(*ptr) _val = atomic_read(ptr); \
+ smp_rmb(); \
+ _val; \
+})
+#endif
+
+#ifndef atomic_mb_set
+#define atomic_mb_set(ptr, i) do { \
+ smp_wmb(); \
+ atomic_set(ptr, i); \
+ smp_mb(); \
+} while (0)
+#endif
+
+#ifndef atomic_xchg
+#ifdef __ATOMIC_SEQ_CST
+#define atomic_xchg(ptr, i) ({ \
+ typeof(*ptr) _new = (i), _old; \
+ __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
+ _old; \
+})
+#elif defined __clang__
+#define atomic_xchg(ptr, i) __sync_exchange(ptr, i)
+#else
+/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
+#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
+#endif
+#endif
+
+/* Provide shorter names for GCC atomic builtins. */
+#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
+#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
+#define atomic_fetch_add __sync_fetch_and_add
+#define atomic_fetch_sub __sync_fetch_and_sub
+#define atomic_fetch_and __sync_fetch_and_and
+#define atomic_fetch_or __sync_fetch_and_or
+#define atomic_cmpxchg __sync_val_compare_and_swap
+
+/* And even shorter names that return void. */
+#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
+#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
+#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
+#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
+#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
+#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
+
#endif
diff --git a/include/qemu/error-report.h b/include/qemu/error-report.h
index 14c1719ad2..3b098a9173 100644
--- a/include/qemu/error-report.h
+++ b/include/qemu/error-report.h
@@ -14,6 +14,7 @@
#define QEMU_ERROR_H
#include <stdarg.h>
+#include <stdbool.h>
#include "qemu/compiler.h"
typedef struct Location {
@@ -40,5 +41,6 @@ void error_print_loc(void);
void error_set_progname(const char *argv0);
void error_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
const char *error_get_progname(void);
+extern bool enable_timestamp_msg;
#endif
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
index bfe7678a04..9ed47aafd3 100644
--- a/include/qemu/int128.h
+++ b/include/qemu/int128.h
@@ -1,6 +1,10 @@
#ifndef INT128_H
#define INT128_H
+#include <assert.h>
+#include <stdint.h>
+#include <stdbool.h>
+
typedef struct Int128 Int128;
struct Int128 {
@@ -55,21 +59,26 @@ static inline Int128 int128_rshift(Int128 a, int n)
static inline Int128 int128_add(Int128 a, Int128 b)
{
- Int128 r = { a.lo + b.lo, a.hi + b.hi };
- r.hi += (r.lo < a.lo) || (r.lo < b.lo);
- return r;
+ uint64_t lo = a.lo + b.lo;
+
+ /* a.lo <= a.lo + b.lo < a.lo + k (k is the base, 2^64). Hence,
+ * a.lo + b.lo >= k implies 0 <= lo = a.lo + b.lo - k < a.lo.
+ * Similarly, a.lo + b.lo < k implies a.lo <= lo = a.lo + b.lo < k.
+ *
+ * So the carry is lo < a.lo.
+ */
+ return (Int128) { lo, (uint64_t)a.hi + b.hi + (lo < a.lo) };
}
static inline Int128 int128_neg(Int128 a)
{
- a.lo = ~a.lo;
- a.hi = ~a.hi;
- return int128_add(a, int128_one());
+ uint64_t lo = -a.lo;
+ return (Int128) { lo, ~(uint64_t)a.hi + !lo };
}
static inline Int128 int128_sub(Int128 a, Int128 b)
{
- return int128_add(a, int128_neg(b));
+ return (Int128){ a.lo - b.lo, a.hi - b.hi - (a.lo < b.lo) };
}
static inline bool int128_nonneg(Int128 a)
@@ -89,7 +98,7 @@ static inline bool int128_ne(Int128 a, Int128 b)
static inline bool int128_ge(Int128 a, Int128 b)
{
- return int128_nonneg(int128_sub(a, b));
+ return a.hi > b.hi || (a.hi == b.hi && a.lo >= b.lo);
}
static inline bool int128_lt(Int128 a, Int128 b)
diff --git a/include/qemu/log.h b/include/qemu/log.h
index a9cf2146c5..d5154246e6 100644
--- a/include/qemu/log.h
+++ b/include/qemu/log.h
@@ -5,6 +5,7 @@
#include <stdbool.h>
#include <stdio.h>
#include "qemu/compiler.h"
+#include "qom/cpu.h"
#ifdef NEED_CPU_H
#include "disas/disas.h"
#endif
@@ -70,22 +71,37 @@ void GCC_FMT_ATTR(2, 3) qemu_log_mask(int mask, const char *fmt, ...);
/* Special cases: */
-#ifdef NEED_CPU_H
/* cpu_dump_state() logging functions: */
-static inline void log_cpu_state(CPUArchState *env1, int flags)
+/**
+ * log_cpu_state:
+ * @cpu: The CPU whose state is to be logged.
+ * @flags: Flags what to log.
+ *
+ * Logs the output of cpu_dump_state().
+ */
+static inline void log_cpu_state(CPUState *cpu, int flags)
{
if (qemu_log_enabled()) {
- cpu_dump_state(ENV_GET_CPU(env1), qemu_logfile, fprintf, flags);
+ cpu_dump_state(cpu, qemu_logfile, fprintf, flags);
}
}
-static inline void log_cpu_state_mask(int mask, CPUArchState *env1, int flags)
+/**
+ * log_cpu_state_mask:
+ * @mask: Mask when to log.
+ * @cpu: The CPU whose state is to be logged.
+ * @flags: Flags what to log.
+ *
+ * Logs the output of cpu_dump_state() if loglevel includes @mask.
+ */
+static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags)
{
if (qemu_loglevel & mask) {
- log_cpu_state(env1, flags);
+ log_cpu_state(cpu, flags);
}
}
+#ifdef NEED_CPU_H
/* disas() and target_disas() to qemu_logfile: */
static inline void log_target_disas(CPUArchState *env, target_ulong start,
target_ulong len, int flags)
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 7cb5e54cf2..dfd81a1d2f 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -24,6 +24,7 @@
#include "hw/qdev-core.h"
#include "exec/hwaddr.h"
#include "qemu/thread.h"
+#include "qemu/tls.h"
#include "qemu/typedefs.h"
typedef int (*WriteCoreDumpFunction)(void *buf, size_t size, void *opaque);
@@ -52,6 +53,7 @@ typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
* @class_by_name: Callback to map -cpu command line model name to an
* instantiatable CPU type.
* @reset: Callback to reset the #CPUState to its initial state.
+ * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
* @do_interrupt: Callback for interrupt handling.
* @do_unassigned_access: Callback for unassigned access handling.
* @dump_state: Callback for dumping state.
@@ -71,6 +73,7 @@ typedef struct CPUClass {
ObjectClass *(*class_by_name)(const char *cpu_model);
void (*reset)(CPUState *cpu);
+ int reset_dump_flags;
void (*do_interrupt)(CPUState *cpu);
CPUUnassignedAccess do_unassigned_access;
void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
@@ -113,6 +116,7 @@ struct kvm_run;
* CPU and return to its top level loop.
* @env_ptr: Pointer to subclass-specific CPUArchState field.
* @current_tb: Currently executing TB.
+ * @next_cpu: Next CPU sharing TB cache.
* @kvm_fd: vCPU file descriptor for KVM.
*
* State of one CPU core or thread.
@@ -145,6 +149,7 @@ struct CPUState {
void *env_ptr; /* CPUArchState */
struct TranslationBlock *current_tb;
+ CPUState *next_cpu;
int kvm_fd;
bool kvm_vcpu_dirty;
@@ -156,6 +161,11 @@ struct CPUState {
uint32_t halted; /* used by alpha, cris, ppc TCG */
};
+extern CPUState *first_cpu;
+
+DECLARE_TLS(CPUState *, current_cpu);
+#define current_cpu tls_var(current_cpu)
+
/**
* cpu_paging_enabled:
* @cpu: The CPU whose state is to be inspected.
@@ -369,6 +379,16 @@ bool cpu_is_stopped(CPUState *cpu);
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
/**
+ * async_run_on_cpu:
+ * @cpu: The vCPU to run on.
+ * @func: The function to be executed.
+ * @data: Data to pass to the function.
+ *
+ * Schedules the function @func for execution on the vCPU @cpu asynchronously.
+ */
+void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
+
+/**
* qemu_for_each_cpu:
* @func: The function to be executed.
* @data: Data to pass to the function.
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
index 031d1f5fb3..00f21f3da2 100644
--- a/include/sysemu/dma.h
+++ b/include/sysemu/dma.h
@@ -29,6 +29,7 @@ struct QEMUSGList {
int nsg;
int nalloc;
size_t size;
+ DeviceState *dev;
AddressSpace *as;
};
@@ -189,7 +190,8 @@ struct ScatterGatherEntry {
dma_addr_t len;
};
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, AddressSpace *as);
+void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
+ AddressSpace *as);
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
void qemu_sglist_destroy(QEMUSGList *qsg);
#endif
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index a14cfe949e..1e08a85116 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -169,11 +169,11 @@ void *kvm_arch_ram_alloc(ram_addr_t size);
void kvm_setup_guest_memory(void *start, size_t size);
void kvm_flush_coalesced_mmio_buffer(void);
-int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
+int kvm_insert_breakpoint(CPUArchState *env, target_ulong addr,
target_ulong len, int type);
-int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
+int kvm_remove_breakpoint(CPUArchState *env, target_ulong addr,
target_ulong len, int type);
-void kvm_remove_all_breakpoints(CPUArchState *current_env);
+void kvm_remove_all_breakpoints(CPUState *cpu);
int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap);
#ifndef _WIN32
int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset);
@@ -252,9 +252,9 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
int kvm_sw_breakpoints_active(CPUState *cpu);
-int kvm_arch_insert_sw_breakpoint(CPUState *current_cpu,
+int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
struct kvm_sw_breakpoint *bp);
-int kvm_arch_remove_sw_breakpoint(CPUState *current_cpu,
+int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
struct kvm_sw_breakpoint *bp);
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
target_ulong len, int type);
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 2fb71afa25..3caeb66eb2 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -35,8 +35,8 @@ void vm_state_notify(int running, RunState state);
#define VMRESET_REPORT true
void vm_start(void);
-void vm_stop(RunState state);
-void vm_stop_force_state(RunState state);
+int vm_stop(RunState state);
+int vm_stop_force_state(RunState state);
typedef enum WakeupReason {
QEMU_WAKEUP_REASON_OTHER = 0,
@@ -185,6 +185,8 @@ char *get_boot_devices_list(size_t *size);
DeviceState *get_boot_device(uint32_t position);
+QemuOpts *qemu_get_machine_opts(void);
+
bool usb_enabled(bool default_usb);
extern QemuOptsList qemu_drive_opts;