aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2016-04-05 11:03:18 +0100
committerPeter Maydell <peter.maydell@linaro.org>2016-04-05 11:03:18 +0100
commitcc621a9838cb045a370717a764907a6d6c47fa76 (patch)
treeb793ec320c34e4b9baccffdff724d14f916f797e
parent972e3ca3c1272419578627a107bdbc19a066c6ee (diff)
parent2354bebaa408b97adf008d3e07f439300b0a5c06 (diff)
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* FreeBSD build fixes (atomics, qapi/error.h) * x86 KVM fixes (SynIC, KVM_GET/SET_MSRS) * Memory API doc fix * checkpatch fix * Chardev and socket fixes * NBD fixes * exec.c SEGV fix # gpg: Signature made Tue 05 Apr 2016 10:47:49 BST using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" * remotes/bonzini/tags/for-upstream: net: fix missing include of qapi/error.h in netmap.c nbd: Fix poor debug message include/qemu/atomic: add compile time asserts cpus: don't use atomic_read for vm_clock_warp_start nbd: don't request FUA on FLUSH doc/memory: update MMIO section char: ensure all clients are in non-blocking mode char: fix broken EAGAIN retry on OS-X due to errno clobbering util: retry getaddrinfo if getting EAI_BADFLAGS with AI_V4MAPPED checkpatch: add target_ulong to typelist target-i386: assert that KVM_GET/SET_MSRS can set all requested MSRs target-i386: do not pass MSR_TSC_AUX to KVM ioctls if CPUID bit is not set memory: fix segv on qemu_ram_free(block=0x0) target-i386/kvm: Hyper-V VMBus hypercalls blank handlers update Linux headers to 4.6 Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--block/nbd-client.c4
-rw-r--r--cpus.c10
-rw-r--r--docs/memory.txt4
-rw-r--r--exec.c4
-rw-r--r--include/qemu/atomic.h58
-rw-r--r--include/standard-headers/asm-x86/hyperv.h4
-rw-r--r--include/standard-headers/linux/input.h1
-rw-r--r--include/standard-headers/linux/types.h5
-rw-r--r--include/standard-headers/linux/virtio_balloon.h2
-rw-r--r--include/standard-headers/linux/virtio_blk.h6
-rw-r--r--linux-headers/asm-arm64/kvm.h6
-rw-r--r--linux-headers/asm-powerpc/epapr_hcalls.h4
-rw-r--r--linux-headers/asm-powerpc/kvm.h9
-rw-r--r--linux-headers/asm-x86/unistd_32.h2
-rw-r--r--linux-headers/asm-x86/unistd_64.h2
-rw-r--r--linux-headers/linux/kvm.h11
-rw-r--r--linux-headers/linux/userfaultfd.h2
-rw-r--r--linux-headers/linux/vfio.h92
-rw-r--r--linux-headers/linux/vhost.h6
-rw-r--r--nbd/client.c2
-rw-r--r--qemu-char.c39
-rwxr-xr-xscripts/checkpatch.pl1
-rw-r--r--target-i386/hyperv.c12
-rw-r--r--target-i386/kvm.c37
-rw-r--r--tests/test-io-channel-socket.c5
-rw-r--r--util/qemu-sockets.c19
26 files changed, 275 insertions, 72 deletions
diff --git a/block/nbd-client.c b/block/nbd-client.c
index 021a88b050..878e879ace 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -319,10 +319,6 @@ int nbd_client_co_flush(BlockDriverState *bs)
return 0;
}
- if (client->nbdflags & NBD_FLAG_SEND_FUA) {
- request.type |= NBD_CMD_FLAG_FUA;
- }
-
request.from = 0;
request.len = 0;
diff --git a/cpus.c b/cpus.c
index 8ae477728d..cbeb1f6139 100644
--- a/cpus.c
+++ b/cpus.c
@@ -338,10 +338,18 @@ static int64_t qemu_icount_round(int64_t count)
static void icount_warp_rt(void)
{
+ unsigned seq;
+ int64_t warp_start;
+
/* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
* changes from -1 to another value, so the race here is okay.
*/
- if (atomic_read(&vm_clock_warp_start) == -1) {
+ do {
+ seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
+ warp_start = vm_clock_warp_start;
+ } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
+
+ if (warp_start == -1) {
return;
}
diff --git a/docs/memory.txt b/docs/memory.txt
index 97134e14c7..f9272ca969 100644
--- a/docs/memory.txt
+++ b/docs/memory.txt
@@ -37,8 +37,8 @@ MemoryRegion):
- MMIO: a range of guest memory that is implemented by host callbacks;
each read or write causes a callback to be called on the host.
- You initialize these with memory_region_io(), passing it a MemoryRegionOps
- structure describing the callbacks.
+ You initialize these with memory_region_init_io(), passing it a
+ MemoryRegionOps structure describing the callbacks.
- ROM: a ROM memory region works like RAM for reads (directly accessing
a region of host memory), but like MMIO for writes (invoking a callback).
diff --git a/exec.c b/exec.c
index f46e596818..c4f9036184 100644
--- a/exec.c
+++ b/exec.c
@@ -1773,6 +1773,10 @@ static void reclaim_ramblock(RAMBlock *block)
void qemu_ram_free(RAMBlock *block)
{
+ if (!block) {
+ return;
+ }
+
qemu_mutex_lock_ramlist();
QLIST_REMOVE_RCU(block, next);
ram_list.mru_block = NULL;
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 8f1d8d927d..5bc4d6cc47 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -42,30 +42,34 @@
* loads/stores past the atomic operation load/store. However there is
* no explicit memory barrier for the processor.
*/
-#define atomic_read(ptr) \
- ({ \
- typeof(*ptr) _val; \
- __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
- _val; \
+#define atomic_read(ptr) \
+ ({ \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
+ _val; \
})
-#define atomic_set(ptr, i) do { \
- typeof(*ptr) _val = (i); \
- __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
+#define atomic_set(ptr, i) do { \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ typeof(*ptr) _val = (i); \
+ __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
} while(0)
/* Atomic RCU operations imply weak memory barriers */
-#define atomic_rcu_read(ptr) \
- ({ \
- typeof(*ptr) _val; \
- __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
- _val; \
+#define atomic_rcu_read(ptr) \
+ ({ \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
+ _val; \
})
-#define atomic_rcu_set(ptr, i) do { \
- typeof(*ptr) _val = (i); \
- __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
+#define atomic_rcu_set(ptr, i) do { \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ typeof(*ptr) _val = (i); \
+ __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
} while(0)
/* atomic_mb_read/set semantics map Java volatile variables. They are
@@ -79,6 +83,7 @@
#if defined(_ARCH_PPC)
#define atomic_mb_read(ptr) \
({ \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
smp_rmb(); \
@@ -86,22 +91,25 @@
})
#define atomic_mb_set(ptr, i) do { \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val = (i); \
smp_wmb(); \
__atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
smp_mb(); \
} while(0)
#else
-#define atomic_mb_read(ptr) \
- ({ \
- typeof(*ptr) _val; \
- __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
- _val; \
+#define atomic_mb_read(ptr) \
+ ({ \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
+ _val; \
})
-#define atomic_mb_set(ptr, i) do { \
- typeof(*ptr) _val = (i); \
- __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
+#define atomic_mb_set(ptr, i) do { \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ typeof(*ptr) _val = (i); \
+ __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
} while(0)
#endif
@@ -109,6 +117,7 @@
/* All the remaining operations are fully sequentially consistent */
#define atomic_xchg(ptr, i) ({ \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _new = (i), _old; \
__atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
_old; \
@@ -117,6 +126,7 @@
/* Returns the eventual value, failed or not */
#define atomic_cmpxchg(ptr, old, new) \
({ \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _old = (old), _new = (new); \
__atomic_compare_exchange(ptr, &_old, &_new, false, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
diff --git a/include/standard-headers/asm-x86/hyperv.h b/include/standard-headers/asm-x86/hyperv.h
index acb119d4b6..47b38fb816 100644
--- a/include/standard-headers/asm-x86/hyperv.h
+++ b/include/standard-headers/asm-x86/hyperv.h
@@ -226,7 +226,9 @@
(~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
/* Declare the various hypercall operations. */
-#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_POST_MESSAGE 0x005c
+#define HVCALL_SIGNAL_EVENT 0x005d
#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001
#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12
diff --git a/include/standard-headers/linux/input.h b/include/standard-headers/linux/input.h
index 8b2c928e0d..a52b2025ba 100644
--- a/include/standard-headers/linux/input.h
+++ b/include/standard-headers/linux/input.h
@@ -243,6 +243,7 @@ struct input_mask {
#define BUS_GSC 0x1A
#define BUS_ATARI 0x1B
#define BUS_SPI 0x1C
+#define BUS_RMI 0x1D
/*
* MT_TOOL types
diff --git a/include/standard-headers/linux/types.h b/include/standard-headers/linux/types.h
index 0526c2b87c..9dbbc73e46 100644
--- a/include/standard-headers/linux/types.h
+++ b/include/standard-headers/linux/types.h
@@ -1,2 +1,3 @@
-#include <stdint.h>
-#include "qemu/compiler.h"
+/* For QEMU all types are already defined via osdep.h, so this
+ * header does not need to do anything.
+ */
diff --git a/include/standard-headers/linux/virtio_balloon.h b/include/standard-headers/linux/virtio_balloon.h
index 0df7c2efdd..9d06ccd066 100644
--- a/include/standard-headers/linux/virtio_balloon.h
+++ b/include/standard-headers/linux/virtio_balloon.h
@@ -51,7 +51,7 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */
#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
-#define VIRTIO_BALLOON_S_AVAIL 6 /* Amount of available memory in guest */
+#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */
#define VIRTIO_BALLOON_S_NR 7
/*
diff --git a/include/standard-headers/linux/virtio_blk.h b/include/standard-headers/linux/virtio_blk.h
index cd601f4069..ab16ec5fd2 100644
--- a/include/standard-headers/linux/virtio_blk.h
+++ b/include/standard-headers/linux/virtio_blk.h
@@ -43,10 +43,10 @@
#ifndef VIRTIO_BLK_NO_LEGACY
#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
-#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */
+#define VIRTIO_BLK_F_FLUSH 9 /* Flush command supported */
#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
-/* Old (deprecated) name for VIRTIO_BLK_F_WCE. */
-#define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE
+/* Old (deprecated) name for VIRTIO_BLK_F_FLUSH. */
+#define VIRTIO_BLK_F_WCE VIRTIO_BLK_F_FLUSH
#endif /* !VIRTIO_BLK_NO_LEGACY */
#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h
index a2fd4d95b3..7d82d1f9d5 100644
--- a/linux-headers/asm-arm64/kvm.h
+++ b/linux-headers/asm-arm64/kvm.h
@@ -94,6 +94,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
+#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */
struct kvm_vcpu_init {
__u32 target;
@@ -204,6 +205,11 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+/* Device Control API on vcpu fd */
+#define KVM_ARM_VCPU_PMU_V3_CTRL 0
+#define KVM_ARM_VCPU_PMU_V3_IRQ 0
+#define KVM_ARM_VCPU_PMU_V3_INIT 1
+
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/linux-headers/asm-powerpc/epapr_hcalls.h b/linux-headers/asm-powerpc/epapr_hcalls.h
index 06f724786a..33b3f89f55 100644
--- a/linux-headers/asm-powerpc/epapr_hcalls.h
+++ b/linux-headers/asm-powerpc/epapr_hcalls.h
@@ -78,7 +78,7 @@
#define EV_SUCCESS 0
#define EV_EPERM 1 /* Operation not permitted */
#define EV_ENOENT 2 /* Entry Not Found */
-#define EV_EIO 3 /* I/O error occured */
+#define EV_EIO 3 /* I/O error occurred */
#define EV_EAGAIN 4 /* The operation had insufficient
* resources to complete and should be
* retried
@@ -89,7 +89,7 @@
#define EV_ENODEV 7 /* No such device */
#define EV_EINVAL 8 /* An argument supplied to the hcall
was out of range or invalid */
-#define EV_INTERNAL 9 /* An internal error occured */
+#define EV_INTERNAL 9 /* An internal error occurred */
#define EV_CONFIG 10 /* A configuration error was detected */
#define EV_INVALID_STATE 11 /* The object is in an invalid state */
#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
diff --git a/linux-headers/asm-powerpc/kvm.h b/linux-headers/asm-powerpc/kvm.h
index ab4d4732c4..c93cf35ce3 100644
--- a/linux-headers/asm-powerpc/kvm.h
+++ b/linux-headers/asm-powerpc/kvm.h
@@ -333,6 +333,15 @@ struct kvm_create_spapr_tce {
__u32 window_size;
};
+/* for KVM_CAP_SPAPR_TCE_64 */
+struct kvm_create_spapr_tce_64 {
+ __u64 liobn;
+ __u32 page_shift;
+ __u32 flags;
+ __u64 offset; /* in pages */
+ __u64 size; /* in pages */
+};
+
/* for KVM_ALLOCATE_RMA */
struct kvm_allocate_rma {
__u64 rma_size;
diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h
index a1525426d8..abeaf40d37 100644
--- a/linux-headers/asm-x86/unistd_32.h
+++ b/linux-headers/asm-x86/unistd_32.h
@@ -375,5 +375,7 @@
#define __NR_membarrier 375
#define __NR_mlock2 376
#define __NR_copy_file_range 377
+#define __NR_preadv2 378
+#define __NR_pwritev2 379
#endif /* _ASM_X86_UNISTD_32_H */
diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h
index 4f67c5446a..73c3d1f66a 100644
--- a/linux-headers/asm-x86/unistd_64.h
+++ b/linux-headers/asm-x86/unistd_64.h
@@ -328,5 +328,7 @@
#define __NR_membarrier 324
#define __NR_mlock2 325
#define __NR_copy_file_range 326
+#define __NR_preadv2 327
+#define __NR_pwritev2 328
#endif /* _ASM_X86_UNISTD_64_H */
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index 4a56b9ea76..3bae71a874 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -157,6 +157,7 @@ struct kvm_s390_skeys {
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
+#define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
union {
struct {
@@ -165,6 +166,11 @@ struct kvm_hyperv_exit {
__u64 evt_page;
__u64 msg_page;
} synic;
+ struct {
+ __u64 input;
+ __u64 result;
+ __u64 params[2];
+ } hcall;
} u;
};
@@ -856,6 +862,9 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
#define KVM_CAP_HYPERV_SYNIC 123
#define KVM_CAP_S390_RI 124
+#define KVM_CAP_SPAPR_TCE_64 125
+#define KVM_CAP_ARM_PMU_V3 126
+#define KVM_CAP_VCPU_ATTRIBUTES 127
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1148,6 +1157,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PPC_ALLOC_HTAB */
#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32)
#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
+#define KVM_CREATE_SPAPR_TCE_64 _IOW(KVMIO, 0xa8, \
+ struct kvm_create_spapr_tce_64)
/* Available with KVM_CAP_RMA */
#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
/* Available with KVM_CAP_PPC_HTAB_FD */
diff --git a/linux-headers/linux/userfaultfd.h b/linux-headers/linux/userfaultfd.h
index 9057d7af3a..19e8453249 100644
--- a/linux-headers/linux/userfaultfd.h
+++ b/linux-headers/linux/userfaultfd.h
@@ -78,7 +78,7 @@ struct uffd_msg {
__u64 reserved3;
} reserved;
} arg;
-} __packed;
+} __attribute__((packed));
/*
* Start at 0x12 and not at 0 to be more strict against bugs.
diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h
index 15e096c43a..759b850a3e 100644
--- a/linux-headers/linux/vfio.h
+++ b/linux-headers/linux/vfio.h
@@ -59,6 +59,33 @@
#define VFIO_TYPE (';')
#define VFIO_BASE 100
+/*
+ * For extension of INFO ioctls, VFIO makes use of a capability chain
+ * designed after PCI/e capabilities. A flag bit indicates whether
+ * this capability chain is supported and a field defined in the fixed
+ * structure defines the offset of the first capability in the chain.
+ * This field is only valid when the corresponding bit in the flags
+ * bitmap is set. This offset field is relative to the start of the
+ * INFO buffer, as is the next field within each capability header.
+ * The id within the header is a shared address space per INFO ioctl,
+ * while the version field is specific to the capability id. The
+ * contents following the header are specific to the capability id.
+ */
+struct vfio_info_cap_header {
+ __u16 id; /* Identifies capability */
+ __u16 version; /* Version specific to the capability ID */
+ __u32 next; /* Offset of next capability */
+};
+
+/*
+ * Callers of INFO ioctls passing insufficiently sized buffers will see
+ * the capability chain flag bit set, a zero value for the first capability
+ * offset (if available within the provided argsz), and argsz will be
+ * updated to report the necessary buffer size. For compatibility, the
+ * INFO ioctl will not report error in this case, but the capability chain
+ * will not be available.
+ */
+
/* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
/**
@@ -194,13 +221,73 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */
#define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */
#define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */
+#define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */
__u32 index; /* Region index */
- __u32 resv; /* Reserved for alignment */
+ __u32 cap_offset; /* Offset within info struct of first cap */
__u64 size; /* Region size (bytes) */
__u64 offset; /* Region offset from start of device fd */
};
#define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
+/*
+ * The sparse mmap capability allows finer granularity of specifying areas
+ * within a region with mmap support. When specified, the user should only
+ * mmap the offset ranges specified by the areas array. mmaps outside of the
+ * areas specified may fail (such as the range covering a PCI MSI-X table) or
+ * may result in improper device behavior.
+ *
+ * The structures below define version 1 of this capability.
+ */
+#define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1
+
+struct vfio_region_sparse_mmap_area {
+ __u64 offset; /* Offset of mmap'able area within region */
+ __u64 size; /* Size of mmap'able area */
+};
+
+struct vfio_region_info_cap_sparse_mmap {
+ struct vfio_info_cap_header header;
+ __u32 nr_areas;
+ __u32 reserved;
+ struct vfio_region_sparse_mmap_area areas[];
+};
+
+/*
+ * The device specific type capability allows regions unique to a specific
+ * device or class of devices to be exposed. This helps solve the problem for
+ * vfio bus drivers of defining which region indexes correspond to which region
+ * on the device, without needing to resort to static indexes, as done by
+ * vfio-pci. For instance, if we were to go back in time, we might remove
+ * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
+ * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
+ * make a "VGA" device specific type to describe the VGA access space. This
+ * means that non-VGA devices wouldn't need to waste this index, and thus the
+ * address space associated with it due to implementation of device file
+ * descriptor offsets in vfio-pci.
+ *
+ * The current implementation is now part of the user ABI, so we can't use this
+ * for VGA, but there are other upcoming use cases, such as opregions for Intel
+ * IGD devices and framebuffers for vGPU devices. We missed VGA, but we'll
+ * use this for future additions.
+ *
+ * The structure below defines version 1 of this capability.
+ */
+#define VFIO_REGION_INFO_CAP_TYPE 2
+
+struct vfio_region_info_cap_type {
+ struct vfio_info_cap_header header;
+ __u32 type; /* global per bus driver */
+ __u32 subtype; /* type specific */
+};
+
+#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31)
+#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
+
+/* 8086 Vendor sub-types */
+#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1)
+#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
+#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
+
/**
* VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
* struct vfio_irq_info)
@@ -336,7 +423,8 @@ enum {
* between described ranges are unimplemented.
*/
VFIO_PCI_VGA_REGION_INDEX,
- VFIO_PCI_NUM_REGIONS
+ VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
+ /* device specific cap to define content. */
};
enum {
diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
index ead86db91a..571294cea0 100644
--- a/linux-headers/linux/vhost.h
+++ b/linux-headers/linux/vhost.h
@@ -126,6 +126,12 @@ struct vhost_memory {
#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
/* Set eventfd to signal an error */
#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
+/* Set busy loop timeout (in us) */
+#define VHOST_SET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x23, \
+ struct vhost_vring_state)
+/* Get busy loop timeout (in us) */
+#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
+ struct vhost_vring_state)
/* VHOST_NET specific defines */
diff --git a/nbd/client.c b/nbd/client.c
index f89c0a16ae..d9b7a9b07e 100644
--- a/nbd/client.c
+++ b/nbd/client.c
@@ -634,7 +634,7 @@ ssize_t nbd_send_request(QIOChannel *ioc, struct nbd_request *request)
cpu_to_be64w((uint64_t*)(buf + 16), request->from);
cpu_to_be32w((uint32_t*)(buf + 24), request->len);
- TRACE("Sending request to client: "
+ TRACE("Sending request to server: "
"{ .from = %" PRIu64", .len = %u, .handle = %" PRIu64", .type=%i}",
request->from, request->len, request->handle, request->type);
diff --git a/qemu-char.c b/qemu-char.c
index 270819aec3..b597ee19ca 100644
--- a/qemu-char.c
+++ b/qemu-char.c
@@ -225,12 +225,12 @@ static void qemu_chr_fe_write_log(CharDriverState *s,
}
while (done < len) {
- do {
- ret = write(s->logfd, buf + done, len - done);
- if (ret == -1 && errno == EAGAIN) {
- g_usleep(100);
- }
- } while (ret == -1 && errno == EAGAIN);
+ retry:
+ ret = write(s->logfd, buf + done, len - done);
+ if (ret == -1 && errno == EAGAIN) {
+ g_usleep(100);
+ goto retry;
+ }
if (ret <= 0) {
return;
@@ -246,12 +246,12 @@ static int qemu_chr_fe_write_buffer(CharDriverState *s, const uint8_t *buf, int
qemu_mutex_lock(&s->chr_write_lock);
while (*offset < len) {
- do {
- res = s->chr_write(s, buf + *offset, len - *offset);
- if (res == -1 && errno == EAGAIN) {
- g_usleep(100);
- }
- } while (res == -1 && errno == EAGAIN);
+ retry:
+ res = s->chr_write(s, buf + *offset, len - *offset);
+ if (res < 0 && errno == EAGAIN) {
+ g_usleep(100);
+ goto retry;
+ }
if (res <= 0) {
break;
@@ -333,12 +333,12 @@ int qemu_chr_fe_read_all(CharDriverState *s, uint8_t *buf, int len)
}
while (offset < len) {
- do {
- res = s->chr_sync_read(s, buf + offset, len - offset);
- if (res == -1 && errno == EAGAIN) {
- g_usleep(100);
- }
- } while (res == -1 && errno == EAGAIN);
+ retry:
+ res = s->chr_sync_read(s, buf + offset, len - offset);
+ if (res == -1 && errno == EAGAIN) {
+ g_usleep(100);
+ goto retry;
+ }
if (res == 0) {
break;
@@ -3081,6 +3081,8 @@ static int tcp_chr_new_client(CharDriverState *chr, QIOChannelSocket *sioc)
s->sioc = sioc;
object_ref(OBJECT(sioc));
+ qio_channel_set_blocking(s->ioc, false, NULL);
+
if (s->do_nodelay) {
qio_channel_set_delay(s->ioc, false);
}
@@ -3112,7 +3114,6 @@ static int tcp_chr_add_client(CharDriverState *chr, int fd)
if (!sioc) {
return -1;
}
- qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
ret = tcp_chr_new_client(chr, sioc);
object_unref(OBJECT(sioc));
return ret;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index c26f76ed09..c9554ba644 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -212,6 +212,7 @@ our @typeList = (
qr{${Ident}_t},
qr{${Ident}_handler},
qr{${Ident}_handler_fn},
+ qr{target_(?:u)?long},
);
# This can be modified by sub possible. Since it can be empty, be careful
diff --git a/target-i386/hyperv.c b/target-i386/hyperv.c
index 6b519c392e..c4d6a9b2b1 100644
--- a/target-i386/hyperv.c
+++ b/target-i386/hyperv.c
@@ -44,6 +44,18 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
return -1;
}
return 0;
+ case KVM_EXIT_HYPERV_HCALL: {
+ uint16_t code;
+
+ code = exit->u.hcall.input & 0xffff;
+ switch (code) {
+ case HVCALL_POST_MESSAGE:
+ case HVCALL_SIGNAL_EVENT:
+ default:
+ exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE;
+ return 0;
+ }
+ }
default:
return -1;
}
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 87ab969ae1..799fdfa682 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -141,6 +141,7 @@ static int kvm_get_tsc(CPUState *cs)
return ret;
}
+ assert(ret == 1);
env->tsc = msr_data.entries[0].data;
return 0;
}
@@ -917,6 +918,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
has_msr_mtrr = true;
}
+ if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
+ has_msr_tsc_aux = false;
+ }
return 0;
}
@@ -1443,6 +1447,7 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
struct kvm_msr_entry entries[1];
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
+ int ret;
if (!has_msr_tsc_deadline) {
return 0;
@@ -1454,7 +1459,13 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
.nmsrs = 1,
};
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ return 0;
}
/*
@@ -1469,6 +1480,11 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
struct kvm_msrs info;
struct kvm_msr_entry entry;
} msr_data;
+ int ret;
+
+ if (!has_msr_feature_control) {
+ return 0;
+ }
kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
cpu->env.msr_ia32_feature_control);
@@ -1477,7 +1493,13 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
.nmsrs = 1,
};
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ return 0;
}
static int kvm_put_msrs(X86CPU *cpu, int level)
@@ -1489,6 +1511,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
int n = 0, i;
+ int ret;
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
@@ -1682,8 +1705,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
.nmsrs = n,
};
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+ assert(ret == n);
+ return 0;
}
@@ -2052,6 +2080,7 @@ static int kvm_get_msrs(X86CPU *cpu)
return ret;
}
+ assert(ret == n);
for (i = 0; i < ret; i++) {
uint32_t index = msrs[i].index;
switch (index) {
@@ -2508,7 +2537,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
- if (level >= KVM_PUT_RESET_STATE && has_msr_feature_control) {
+ if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_msr_feature_control(x86_cpu);
if (ret < 0) {
return ret;
diff --git a/tests/test-io-channel-socket.c b/tests/test-io-channel-socket.c
index 9d94adb9ac..855306b8dd 100644
--- a/tests/test-io-channel-socket.c
+++ b/tests/test-io-channel-socket.c
@@ -27,9 +27,6 @@
#ifndef AI_ADDRCONFIG
# define AI_ADDRCONFIG 0
#endif
-#ifndef AI_V4MAPPED
-# define AI_V4MAPPED 0
-#endif
#ifndef EAI_ADDRFAMILY
# define EAI_ADDRFAMILY 0
#endif
@@ -42,7 +39,7 @@ static int check_bind(const char *hostname, bool *has_proto)
int ret = -1;
memset(&ai, 0, sizeof(ai));
- ai.ai_flags = AI_CANONNAME | AI_V4MAPPED | AI_ADDRCONFIG;
+ ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG;
ai.ai_family = AF_UNSPEC;
ai.ai_socktype = SOCK_STREAM;
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index b87e17fa56..0d536911c9 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -29,6 +29,7 @@
#ifndef AI_ADDRCONFIG
# define AI_ADDRCONFIG 0
#endif
+
#ifndef AI_V4MAPPED
# define AI_V4MAPPED 0
#endif
@@ -354,10 +355,14 @@ static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr,
struct addrinfo ai, *res;
int rc;
Error *err = NULL;
+ static int useV4Mapped = 1;
memset(&ai, 0, sizeof(ai));
- ai.ai_flags = AI_CANONNAME | AI_V4MAPPED | AI_ADDRCONFIG;
+ ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG;
+ if (atomic_read(&useV4Mapped)) {
+ ai.ai_flags |= AI_V4MAPPED;
+ }
ai.ai_family = inet_ai_family_from_address(saddr, &err);
ai.ai_socktype = SOCK_STREAM;
@@ -373,6 +378,18 @@ static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr,
/* lookup */
rc = getaddrinfo(saddr->host, saddr->port, &ai, &res);
+
+ /* At least FreeBSD and OS-X 10.6 declare AI_V4MAPPED but
+ * then don't implement it in their getaddrinfo(). Detect
+ * this and retry without the flag since that's preferrable
+ * to a fatal error
+ */
+ if (rc == EAI_BADFLAGS &&
+ (ai.ai_flags & AI_V4MAPPED)) {
+ atomic_set(&useV4Mapped, 0);
+ ai.ai_flags &= ~AI_V4MAPPED;
+ rc = getaddrinfo(saddr->host, saddr->port, &ai, &res);
+ }
if (rc != 0) {
error_setg(errp, "address resolution failed for %s:%s: %s",
saddr->host, saddr->port, gai_strerror(rc));