aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--backends/hostmem.c2
-rw-r--r--block/block-backend.c8
-rw-r--r--block/crypto.c2
-rw-r--r--block/throttle-groups.c2
-rw-r--r--gdbstub.c2
-rw-r--r--hw/arm/xlnx-zynqmp.c6
-rw-r--r--hw/block/nvme.c5
-rw-r--r--hw/core/machine-qmp-cmds.c2
-rw-r--r--hw/core/machine.c5
-rw-r--r--hw/display/qxl.c9
-rw-r--r--hw/mem/nvdimm.c5
-rw-r--r--hw/mem/pc-dimm.c5
-rw-r--r--hw/misc/ivshmem.c5
-rw-r--r--hw/net/net_tx_pkt.c23
-rw-r--r--hw/net/net_tx_pkt.h14
-rw-r--r--hw/net/xgmac.c14
-rw-r--r--hw/ppc/spapr_drc.c3
-rw-r--r--hw/ppc/trace-events2
-rw-r--r--hw/usb/hcd-xhci.c4
-rw-r--r--hw/virtio/virtio-crypto.c5
-rw-r--r--hw/virtio/virtio-mem.c6
-rw-r--r--hw/virtio/virtio-pmem.c5
-rw-r--r--include/net/eth.h1
-rw-r--r--include/qom/object.h7
-rw-r--r--iothread.c9
-rw-r--r--net/net.c6
-rw-r--r--qom/object.c7
-rw-r--r--qom/qom-hmp-cmds.c32
-rw-r--r--scripts/qapi/visit.py1
-rw-r--r--scsi/pr-manager-helper.c3
-rw-r--r--scsi/pr-manager.c2
-rw-r--r--softmmu/memory.c2
-rwxr-xr-xtests/qemu-iotests/29644
-rw-r--r--tests/qemu-iotests/296.out12
-rw-r--r--util/module.c3
35 files changed, 182 insertions, 81 deletions
diff --git a/backends/hostmem.c b/backends/hostmem.c
index c614f1bdc1..4bde00e8e7 100644
--- a/backends/hostmem.c
+++ b/backends/hostmem.c
@@ -33,7 +33,7 @@ char *
host_memory_backend_get_name(HostMemoryBackend *backend)
{
if (!backend->use_canonical_path) {
- return object_get_canonical_path_component(OBJECT(backend));
+ return g_strdup(object_get_canonical_path_component(OBJECT(backend)));
}
return object_get_canonical_path(OBJECT(backend));
diff --git a/block/block-backend.c b/block/block-backend.c
index 0bf0188133..3a13cb5f0b 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -1394,8 +1394,16 @@ typedef struct BlkAioEmAIOCB {
bool has_returned;
} BlkAioEmAIOCB;
+static AioContext *blk_aio_em_aiocb_get_aio_context(BlockAIOCB *acb_)
+{
+ BlkAioEmAIOCB *acb = container_of(acb_, BlkAioEmAIOCB, common);
+
+ return blk_get_aio_context(acb->rwco.blk);
+}
+
static const AIOCBInfo blk_aio_em_aiocb_info = {
.aiocb_size = sizeof(BlkAioEmAIOCB),
+ .get_aio_context = blk_aio_em_aiocb_get_aio_context,
};
static void blk_aio_complete(BlkAioEmAIOCB *acb)
diff --git a/block/crypto.c b/block/crypto.c
index 8725c1bc02..0807557763 100644
--- a/block/crypto.c
+++ b/block/crypto.c
@@ -881,7 +881,7 @@ block_crypto_child_perms(BlockDriverState *bs, BdrvChild *c,
* For backward compatibility, manually share the write
* and resize permission
*/
- *nshared |= (BLK_PERM_WRITE | BLK_PERM_RESIZE);
+ *nshared |= shared & (BLK_PERM_WRITE | BLK_PERM_RESIZE);
/*
* Since we are not fully a format driver, don't always request
* the read/resize permission but only when explicitly
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 03a53c89ea..98fea7fd47 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -771,7 +771,7 @@ static void throttle_group_obj_complete(UserCreatable *obj, Error **errp)
/* set group name to object id if it exists */
if (!tg->name && tg->parent_obj.parent) {
- tg->name = object_get_canonical_path_component(OBJECT(obj));
+ tg->name = g_strdup(object_get_canonical_path_component(OBJECT(obj)));
}
/* We must have a group name at this point */
assert(tg->name);
diff --git a/gdbstub.c b/gdbstub.c
index 6950fd243f..f3a318cd7f 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -2059,7 +2059,7 @@ static void handle_query_thread_extra(GdbCmdContext *gdb_ctx, void *user_ctx)
/* Print the CPU model and name in multiprocess mode */
ObjectClass *oc = object_get_class(OBJECT(cpu));
const char *cpu_model = object_class_get_name(oc);
- g_autofree char *cpu_name =
+ const char *cpu_name =
object_get_canonical_path_component(OBJECT(cpu));
g_string_printf(rs, "%s %s [%s]", cpu_model, cpu_name,
cpu->halted ? "halted " : "running");
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index 5855e5d5bf..c435b9d52a 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -190,7 +190,7 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s,
qdev_prop_set_uint32(DEVICE(&s->rpu_cluster), "cluster-id", 1);
for (i = 0; i < num_rpus; i++) {
- char *name;
+ const char *name;
object_initialize_child(OBJECT(&s->rpu_cluster), "rpu-cpu[*]",
&s->rpu_cpu[i],
@@ -204,7 +204,6 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s,
} else {
s->boot_cpu_ptr = &s->rpu_cpu[i];
}
- g_free(name);
object_property_set_bool(OBJECT(&s->rpu_cpu[i]), "reset-hivecs", true,
&error_abort);
@@ -341,7 +340,7 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
/* Realize APUs before realizing the GIC. KVM requires this. */
for (i = 0; i < num_apus; i++) {
- char *name;
+ const char *name;
object_property_set_int(OBJECT(&s->apu_cpu[i]), "psci-conduit",
QEMU_PSCI_CONDUIT_SMC, &error_abort);
@@ -354,7 +353,6 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
} else {
s->boot_cpu_ptr = &s->apu_cpu[i];
}
- g_free(name);
object_property_set_bool(OBJECT(&s->apu_cpu[i]), "has_el3", s->secure,
NULL);
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 1aee042d4c..3426e17e65 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1397,9 +1397,8 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
if (!n->params.cmb_size_mb && n->pmrdev) {
if (host_memory_backend_is_mapped(n->pmrdev)) {
- char *path = object_get_canonical_path_component(OBJECT(n->pmrdev));
- error_setg(errp, "can't use already busy memdev: %s", path);
- g_free(path);
+ error_setg(errp, "can't use already busy memdev: %s",
+ object_get_canonical_path_component(OBJECT(n->pmrdev)));
return;
}
diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c
index 2c5da8413d..963088b798 100644
--- a/hw/core/machine-qmp-cmds.c
+++ b/hw/core/machine-qmp-cmds.c
@@ -315,7 +315,7 @@ static int query_memdev(Object *obj, void *opaque)
m->value = g_malloc0(sizeof(*m->value));
- m->value->id = object_get_canonical_path_component(obj);
+ m->value->id = g_strdup(object_get_canonical_path_component(obj));
m->value->has_id = !!m->value->id;
m->value->size = object_property_get_uint(obj, "size",
diff --git a/hw/core/machine.c b/hw/core/machine.c
index eb267b828d..2f881d6d75 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -1073,9 +1073,8 @@ MemoryRegion *machine_consume_memdev(MachineState *machine,
MemoryRegion *ret = host_memory_backend_get_memory(backend);
if (memory_region_is_mapped(ret)) {
- char *path = object_get_canonical_path_component(OBJECT(backend));
- error_report("memory backend %s can't be used multiple times.", path);
- g_free(path);
+ error_report("memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(backend)));
exit(EXIT_FAILURE);
}
host_memory_backend_set_mapped(backend, true);
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index d5627119ec..11871340e7 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -1762,7 +1762,16 @@ async_common:
qxl_set_mode(d, val, 0);
break;
case QXL_IO_LOG:
+#ifdef CONFIG_MODULES
+ /*
+ * FIXME
+ * trace_event_get_state_backends() does not work for modules,
+ * it leads to "undefined symbol: qemu_qxl_io_log_semaphore"
+ */
+ if (true) {
+#else
if (trace_event_get_state_backends(TRACE_QXL_IO_LOG) || d->guestdebug) {
+#endif
/* We cannot trust the guest to NUL terminate d->ram->log_buf */
char *log_buf = g_strndup((const char *)d->ram->log_buf,
sizeof(d->ram->log_buf));
diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c
index d0d6e553cf..e1574bc07c 100644
--- a/hw/mem/nvdimm.c
+++ b/hw/mem/nvdimm.c
@@ -137,13 +137,12 @@ static void nvdimm_prepare_memory_region(NVDIMMDevice *nvdimm, Error **errp)
if (size <= nvdimm->label_size || !pmem_size) {
HostMemoryBackend *hostmem = dimm->hostmem;
- char *path = object_get_canonical_path_component(OBJECT(hostmem));
error_setg(errp, "the size of memdev %s (0x%" PRIx64 ") is too "
"small to contain nvdimm label (0x%" PRIx64 ") and "
"aligned PMEM (0x%" PRIx64 ")",
- path, memory_region_size(mr), nvdimm->label_size, align);
- g_free(path);
+ object_get_canonical_path_component(OBJECT(hostmem)),
+ memory_region_size(mr), nvdimm->label_size, align);
return;
}
diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 9d3f0b9691..c30351070b 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -179,9 +179,8 @@ static void pc_dimm_realize(DeviceState *dev, Error **errp)
error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property is not set");
return;
} else if (host_memory_backend_is_mapped(dimm->hostmem)) {
- char *path = object_get_canonical_path_component(OBJECT(dimm->hostmem));
- error_setg(errp, "can't use already busy memdev: %s", path);
- g_free(path);
+ error_setg(errp, "can't use already busy memdev: %s",
+ object_get_canonical_path_component(OBJECT(dimm->hostmem)));
return;
}
if (((nb_numa_nodes > 0) && (dimm->node >= nb_numa_nodes)) ||
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index fc128b25e2..2b6882face 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -1037,9 +1037,8 @@ static void ivshmem_plain_realize(PCIDevice *dev, Error **errp)
error_setg(errp, "You must specify a 'memdev'");
return;
} else if (host_memory_backend_is_mapped(s->hostmem)) {
- char *path = object_get_canonical_path_component(OBJECT(s->hostmem));
- error_setg(errp, "can't use already busy memdev: %s", path);
- g_free(path);
+ error_setg(errp, "can't use already busy memdev: %s",
+ object_get_canonical_path_component(OBJECT(s->hostmem)));
return;
}
diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c
index 331c73cfc0..9560e4a49e 100644
--- a/hw/net/net_tx_pkt.c
+++ b/hw/net/net_tx_pkt.c
@@ -626,6 +626,7 @@ bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
if (pkt->has_virt_hdr ||
pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
+ net_tx_pkt_fix_ip6_payload_len(pkt);
net_tx_pkt_sendv(pkt, nc, pkt->vec,
pkt->payload_frags + NET_TX_PKT_PL_START_FRAG);
return true;
@@ -644,3 +645,25 @@ bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc)
return res;
}
+
+void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt)
+{
+ struct iovec *l2 = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
+ if (eth_get_l3_proto(l2, 1, l2->iov_len) == ETH_P_IPV6) {
+ struct ip6_header *ip6 = (struct ip6_header *) pkt->l3_hdr;
+ /*
+ * TODO: if qemu would support >64K packets - add jumbo option check
+ * something like that:
+ * 'if (ip6->ip6_plen == 0 && !has_jumbo_option(ip6)) {'
+ */
+ if (ip6->ip6_plen == 0) {
+ if (pkt->payload_len <= ETH_MAX_IP_DGRAM_LEN) {
+ ip6->ip6_plen = htons(pkt->payload_len);
+ }
+ /*
+ * TODO: if qemu would support >64K packets
+ * add jumbo option for packets greater then 65,535 bytes
+ */
+ }
+ }
+}
diff --git a/hw/net/net_tx_pkt.h b/hw/net/net_tx_pkt.h
index 212ecc62fc..4ec8bbe9bd 100644
--- a/hw/net/net_tx_pkt.h
+++ b/hw/net/net_tx_pkt.h
@@ -187,4 +187,18 @@ bool net_tx_pkt_parse(struct NetTxPkt *pkt);
*/
bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt);
+/**
+ * Fix IPv6 'plen' field.
+ * If ipv6 payload length field is 0 - then there should be Hop-by-Hop
+ * option for packets greater than 65,535.
+ * For packets with a payload less than 65,535: fix 'plen' field.
+ * For backends with vheader, we need just one packet with proper
+ * payload size. For now, qemu drops every packet with size greater 64K
+ * (see net_tx_pkt_send()) so, there is no reason to add jumbo option to ip6
+ * hop-by-hop extension if it's missed
+ *
+ * @pkt packet
+ */
+void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt);
+
#endif
diff --git a/hw/net/xgmac.c b/hw/net/xgmac.c
index 574dd47b41..5bf1b61012 100644
--- a/hw/net/xgmac.c
+++ b/hw/net/xgmac.c
@@ -220,21 +220,31 @@ static void xgmac_enet_send(XgmacState *s)
}
len = (bd.buffer1_size & 0xfff) + (bd.buffer2_size & 0xfff);
+ /*
+ * FIXME: these cases of malformed tx descriptors (bad sizes)
+ * should probably be reported back to the guest somehow
+ * rather than simply silently stopping processing, but we
+ * don't know what the hardware does in this situation.
+ * This will only happen for buggy guests anyway.
+ */
if ((bd.buffer1_size & 0xfff) > 2048) {
DEBUGF_BRK("qemu:%s:ERROR...ERROR...ERROR... -- "
"xgmac buffer 1 len on send > 2048 (0x%x)\n",
__func__, bd.buffer1_size & 0xfff);
+ break;
}
if ((bd.buffer2_size & 0xfff) != 0) {
DEBUGF_BRK("qemu:%s:ERROR...ERROR...ERROR... -- "
"xgmac buffer 2 len on send != 0 (0x%x)\n",
__func__, bd.buffer2_size & 0xfff);
+ break;
}
- if (len >= sizeof(frame)) {
+ if (frame_size + len >= sizeof(frame)) {
DEBUGF_BRK("qemu:%s: buffer overflow %d read into %zu "
- "buffer\n" , __func__, len, sizeof(frame));
+ "buffer\n" , __func__, frame_size + len, sizeof(frame));
DEBUGF_BRK("qemu:%s: buffer1.size=%d; buffer2.size=%d\n",
__func__, bd.buffer1_size, bd.buffer2_size);
+ break;
}
cpu_physical_memory_read(bd.buffer1_addr, ptr, len);
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index 43d12bc33a..fe998d8108 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -513,7 +513,7 @@ static void realize(DeviceState *d, Error **errp)
SpaprDrc *drc = SPAPR_DR_CONNECTOR(d);
Object *root_container;
gchar *link_name;
- char *child_name;
+ const char *child_name;
trace_spapr_drc_realize(spapr_drc_index(drc));
/* NOTE: we do this as part of realize/unrealize due to the fact
@@ -529,7 +529,6 @@ static void realize(DeviceState *d, Error **errp)
trace_spapr_drc_realize_child(spapr_drc_index(drc), child_name);
object_property_add_alias(root_container, link_name,
drc->owner, child_name);
- g_free(child_name);
g_free(link_name);
vmstate_register(VMSTATE_IF(drc), spapr_drc_index(drc), &vmstate_spapr_drc,
drc);
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index 9ea620f23c..7c0be4102e 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -57,7 +57,7 @@ spapr_drc_detach(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_awaiting_quiesce(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_reset(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_realize(uint32_t index) "drc: 0x%"PRIx32
-spapr_drc_realize_child(uint32_t index, char *childname) "drc: 0x%"PRIx32", child name: %s"
+spapr_drc_realize_child(uint32_t index, const char *childname) "drc: 0x%"PRIx32", child name: %s"
spapr_drc_realize_complete(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_unrealize(uint32_t index) "drc: 0x%"PRIx32
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index b330e36fe6..67a18fe2b6 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -3184,7 +3184,7 @@ static const MemoryRegionOps xhci_oper_ops = {
.read = xhci_oper_read,
.write = xhci_oper_write,
.valid.min_access_size = 4,
- .valid.max_access_size = 4,
+ .valid.max_access_size = sizeof(dma_addr_t),
.endianness = DEVICE_LITTLE_ENDIAN,
};
@@ -3200,7 +3200,7 @@ static const MemoryRegionOps xhci_runtime_ops = {
.read = xhci_runtime_read,
.write = xhci_runtime_write,
.valid.min_access_size = 4,
- .valid.max_access_size = 4,
+ .valid.max_access_size = sizeof(dma_addr_t),
.endianness = DEVICE_LITTLE_ENDIAN,
};
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index bd9165c565..6da12e315f 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -786,9 +786,8 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
error_setg(errp, "'cryptodev' parameter expects a valid object");
return;
} else if (cryptodev_backend_is_used(vcrypto->cryptodev)) {
- char *path = object_get_canonical_path_component(OBJECT(vcrypto->conf.cryptodev));
- error_setg(errp, "can't use already used cryptodev backend: %s", path);
- g_free(path);
+ error_setg(errp, "can't use already used cryptodev backend: %s",
+ object_get_canonical_path_component(OBJECT(vcrypto->conf.cryptodev)));
return;
}
diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c
index 65850530e7..c12e9f79b0 100644
--- a/hw/virtio/virtio-mem.c
+++ b/hw/virtio/virtio-mem.c
@@ -409,11 +409,9 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
error_setg(errp, "'%s' property is not set", VIRTIO_MEM_MEMDEV_PROP);
return;
} else if (host_memory_backend_is_mapped(vmem->memdev)) {
- char *path = object_get_canonical_path_component(OBJECT(vmem->memdev));
-
error_setg(errp, "'%s' property specifies a busy memdev: %s",
- VIRTIO_MEM_MEMDEV_PROP, path);
- g_free(path);
+ VIRTIO_MEM_MEMDEV_PROP,
+ object_get_canonical_path_component(OBJECT(vmem->memdev)));
return;
} else if (!memory_region_is_ram(&vmem->memdev->mr) ||
memory_region_is_rom(&vmem->memdev->mr) ||
diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c
index c3374b2f3f..1e0c137497 100644
--- a/hw/virtio/virtio-pmem.c
+++ b/hw/virtio/virtio-pmem.c
@@ -112,9 +112,8 @@ static void virtio_pmem_realize(DeviceState *dev, Error **errp)
}
if (host_memory_backend_is_mapped(pmem->memdev)) {
- char *path = object_get_canonical_path_component(OBJECT(pmem->memdev));
- error_setg(errp, "can't use already busy memdev: %s", path);
- g_free(path);
+ error_setg(errp, "can't use already busy memdev: %s",
+ object_get_canonical_path_component(OBJECT(pmem->memdev)));
return;
}
diff --git a/include/net/eth.h b/include/net/eth.h
index 7f45c678e7..0671be6916 100644
--- a/include/net/eth.h
+++ b/include/net/eth.h
@@ -186,6 +186,7 @@ struct tcp_hdr {
#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt
#define ip6_ecn_acc ip6_ctlun.ip6_un3.ip6_un3_ecn
+#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen
#define PKT_GET_ETH_HDR(p) \
((struct eth_header *)(p))
diff --git a/include/qom/object.h b/include/qom/object.h
index 79c8f838b6..0f3a60617c 100644
--- a/include/qom/object.h
+++ b/include/qom/object.h
@@ -1462,13 +1462,14 @@ Object *object_get_internal_root(void);
* path is the path within the composition tree starting from the root.
* %NULL if the object doesn't have a parent (and thus a canonical path).
*/
-char *object_get_canonical_path_component(const Object *obj);
+const char *object_get_canonical_path_component(const Object *obj);
/**
* object_get_canonical_path:
*
- * Returns: The canonical path for a object. This is the path within the
- * composition tree starting from the root.
+ * Returns: The canonical path for a object, newly allocated. This is
+ * the path within the composition tree starting from the root. Use
+ * g_free() to free it.
*/
char *object_get_canonical_path(const Object *obj);
diff --git a/iothread.c b/iothread.c
index 0598a6d20d..263ec6e5bc 100644
--- a/iothread.c
+++ b/iothread.c
@@ -165,7 +165,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
{
Error *local_error = NULL;
IOThread *iothread = IOTHREAD(obj);
- char *name, *thread_name;
+ char *thread_name;
iothread->stopping = false;
iothread->running = true;
@@ -195,12 +195,11 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
/* This assumes we are called from a thread with useful CPU affinity for us
* to inherit.
*/
- name = object_get_canonical_path_component(OBJECT(obj));
- thread_name = g_strdup_printf("IO %s", name);
+ thread_name = g_strdup_printf("IO %s",
+ object_get_canonical_path_component(OBJECT(obj)));
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
iothread, QEMU_THREAD_JOINABLE);
g_free(thread_name);
- g_free(name);
/* Wait for initialization to complete */
while (iothread->thread_id == -1) {
@@ -303,7 +302,7 @@ type_init(iothread_register_types)
char *iothread_get_id(IOThread *iothread)
{
- return object_get_canonical_path_component(OBJECT(iothread));
+ return g_strdup(object_get_canonical_path_component(OBJECT(iothread)));
}
AioContext *iothread_get_aio_context(IOThread *iothread)
diff --git a/net/net.c b/net/net.c
index 7fddcebaa2..bbaedb3c7a 100644
--- a/net/net.c
+++ b/net/net.c
@@ -1185,12 +1185,10 @@ void print_net_client(Monitor *mon, NetClientState *nc)
monitor_printf(mon, "filters:\n");
}
QTAILQ_FOREACH(nf, &nc->filters, next) {
- char *path = object_get_canonical_path_component(OBJECT(nf));
-
- monitor_printf(mon, " - %s: type=%s", path,
+ monitor_printf(mon, " - %s: type=%s",
+ object_get_canonical_path_component(OBJECT(nf)),
object_get_typename(OBJECT(nf)));
netfilter_print_info(mon, nf);
- g_free(path);
}
}
diff --git a/qom/object.c b/qom/object.c
index 76f5f75239..00fdf89b3b 100644
--- a/qom/object.c
+++ b/qom/object.c
@@ -1931,7 +1931,7 @@ object_property_add_const_link(Object *obj, const char *name,
NULL, OBJ_PROP_LINK_DIRECT);
}
-char *object_get_canonical_path_component(const Object *obj)
+const char *object_get_canonical_path_component(const Object *obj)
{
ObjectProperty *prop = NULL;
GHashTableIter iter;
@@ -1947,7 +1947,7 @@ char *object_get_canonical_path_component(const Object *obj)
}
if (prop->opaque == obj) {
- return g_strdup(prop->name);
+ return prop->name;
}
}
@@ -1966,7 +1966,7 @@ char *object_get_canonical_path(const Object *obj)
}
do {
- char *component = object_get_canonical_path_component(obj);
+ const char *component = object_get_canonical_path_component(obj);
if (!component) {
/* A canonical path must be complete, so discard what was
@@ -1978,7 +1978,6 @@ char *object_get_canonical_path(const Object *obj)
newpath = g_strdup_printf("/%s%s", component, path ? path : "");
g_free(path);
- g_free(component);
path = newpath;
obj = obj->parent;
} while (obj != root);
diff --git a/qom/qom-hmp-cmds.c b/qom/qom-hmp-cmds.c
index aaacadacca..8861a109d5 100644
--- a/qom/qom-hmp-cmds.c
+++ b/qom/qom-hmp-cmds.c
@@ -94,42 +94,40 @@ typedef struct QOMCompositionState {
static void print_qom_composition(Monitor *mon, Object *obj, int indent);
-static int qom_composition_compare(const void *a, const void *b, void *ignore)
+static int qom_composition_compare(const void *a, const void *b)
{
- g_autofree char *ac = object_get_canonical_path_component(a);
- g_autofree char *bc = object_get_canonical_path_component(b);
-
- return g_strcmp0(ac, bc);
+ return g_strcmp0(object_get_canonical_path_component(*(Object **)a),
+ object_get_canonical_path_component(*(Object **)b));
}
static int insert_qom_composition_child(Object *obj, void *opaque)
{
- GQueue *children = opaque;
-
- g_queue_insert_sorted(children, obj, qom_composition_compare, NULL);
+ g_array_append_val(opaque, obj);
return 0;
}
static void print_qom_composition(Monitor *mon, Object *obj, int indent)
{
- char *name;
- GQueue children;
- Object *child;
+ GArray *children = g_array_new(false, false, sizeof(Object *));
+ const char *name;
+ int i;
if (obj == object_get_root()) {
- name = g_strdup("");
+ name = "";
} else {
name = object_get_canonical_path_component(obj);
}
monitor_printf(mon, "%*s/%s (%s)\n", indent, "", name,
object_get_typename(obj));
- g_free(name);
- g_queue_init(&children);
- object_child_foreach(obj, insert_qom_composition_child, &children);
- while ((child = g_queue_pop_head(&children))) {
- print_qom_composition(mon, child, indent + 2);
+ object_child_foreach(obj, insert_qom_composition_child, children);
+ g_array_sort(children, qom_composition_compare);
+
+ for (i = 0; i < children->len; i++) {
+ print_qom_composition(mon, g_array_index(children, Object *, i),
+ indent + 2);
}
+ g_array_free(children, TRUE);
}
void hmp_info_qom_tree(Monitor *mon, const QDict *dict)
diff --git a/scripts/qapi/visit.py b/scripts/qapi/visit.py
index 3fb2f30510..cdabc5fa28 100644
--- a/scripts/qapi/visit.py
+++ b/scripts/qapi/visit.py
@@ -249,6 +249,7 @@ bool visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error
if (!*obj) {
/* incomplete */
assert(visit_is_dealloc(v));
+ ok = true;
goto out_obj;
}
if (!visit_type_%(c_name)s_members(v, *obj, errp)) {
diff --git a/scsi/pr-manager-helper.c b/scsi/pr-manager-helper.c
index bf62cbec11..5acccfb4e3 100644
--- a/scsi/pr-manager-helper.c
+++ b/scsi/pr-manager-helper.c
@@ -42,11 +42,10 @@ typedef struct PRManagerHelper {
static void pr_manager_send_status_changed_event(PRManagerHelper *pr_mgr)
{
- char *id = object_get_canonical_path_component(OBJECT(pr_mgr));
+ const char *id = object_get_canonical_path_component(OBJECT(pr_mgr));
if (id) {
qapi_event_send_pr_manager_status_changed(id, !!pr_mgr->ioc);
- g_free(id);
}
}
diff --git a/scsi/pr-manager.c b/scsi/pr-manager.c
index 0c866e8698..32b9287e68 100644
--- a/scsi/pr-manager.c
+++ b/scsi/pr-manager.c
@@ -128,7 +128,7 @@ static int query_one_pr_manager(Object *object, void *opaque)
elem = g_new0(PRManagerInfoList, 1);
info = g_new0(PRManagerInfo, 1);
- info->id = object_get_canonical_path_component(object);
+ info->id = g_strdup(object_get_canonical_path_component(object));
info->connected = pr_manager_is_connected(pr_mgr);
elem->value = info;
elem->next = NULL;
diff --git a/softmmu/memory.c b/softmmu/memory.c
index 9200b20130..af25987518 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -1764,7 +1764,7 @@ const char *memory_region_name(const MemoryRegion *mr)
{
if (!mr->name) {
((MemoryRegion *)mr)->name =
- object_get_canonical_path_component(OBJECT(mr));
+ g_strdup(object_get_canonical_path_component(OBJECT(mr)));
}
return mr->name;
}
diff --git a/tests/qemu-iotests/296 b/tests/qemu-iotests/296
index ec69ec8974..fb7dec88aa 100755
--- a/tests/qemu-iotests/296
+++ b/tests/qemu-iotests/296
@@ -133,6 +133,21 @@ class EncryptionSetupTestCase(iotests.QMPTestCase):
)
self.assert_qmp(result, 'return', {})
+
+ ###########################################################################
+ # add virtio-blk consumer for a block device
+ def addImageUser(self, vm, id, disk_id, share_rw=False):
+ result = vm.qmp('device_add', **
+ {
+ 'driver': 'virtio-blk',
+ 'id': id,
+ 'drive': disk_id,
+ 'share-rw' : share_rw
+ }
+ )
+
+ iotests.log(result)
+
# close the encrypted block device
def closeImageQmp(self, vm, id):
result = vm.qmp('blockdev-del', **{ 'node-name': id })
@@ -159,7 +174,7 @@ class EncryptionSetupTestCase(iotests.QMPTestCase):
vm.run_job('job0')
# test that when the image opened by two qemu processes,
- # neither of them can update the image
+ # neither of them can update the encryption keys
def test1(self):
self.createImg(test_img, self.secrets[0]);
@@ -193,6 +208,9 @@ class EncryptionSetupTestCase(iotests.QMPTestCase):
os.remove(test_img)
+ # test that when the image opened by two qemu processes,
+ # even if first VM opens it read-only, the second can't update encryption
+ # keys
def test2(self):
self.createImg(test_img, self.secrets[0]);
@@ -226,6 +244,30 @@ class EncryptionSetupTestCase(iotests.QMPTestCase):
self.closeImageQmp(self.vm1, "testdev")
os.remove(test_img)
+ # test that two VMs can't open the same luks image by default
+ # and attach it to a guest device
+ def test3(self):
+ self.createImg(test_img, self.secrets[0]);
+
+ self.openImageQmp(self.vm1, "testdev", test_img, self.secrets[0])
+ self.addImageUser(self.vm1, "testctrl", "testdev")
+
+ self.openImageQmp(self.vm2, "testdev", test_img, self.secrets[0])
+ self.addImageUser(self.vm2, "testctrl", "testdev")
+
+
+ # test that two VMs can attach the same luks image to a guest device,
+ # if both use share-rw=on
+ def test4(self):
+ self.createImg(test_img, self.secrets[0]);
+
+ self.openImageQmp(self.vm1, "testdev", test_img, self.secrets[0])
+ self.addImageUser(self.vm1, "testctrl", "testdev", share_rw=True)
+
+ self.openImageQmp(self.vm2, "testdev", test_img, self.secrets[0])
+ self.addImageUser(self.vm2, "testctrl", "testdev", share_rw=True)
+
+
if __name__ == '__main__':
# support only raw luks since luks encrypted qcow2 is a proper
diff --git a/tests/qemu-iotests/296.out b/tests/qemu-iotests/296.out
index afb6d2d09d..cb2859a15c 100644
--- a/tests/qemu-iotests/296.out
+++ b/tests/qemu-iotests/296.out
@@ -26,8 +26,16 @@ Job failed: Failed to get shared "consistent read" lock
{"return": {}}
{"execute": "job-dismiss", "arguments": {"id": "job0"}}
{"return": {}}
-..
+Formatting 'TEST_DIR/test.img', fmt=luks size=1048576 key-secret=keysec0 iter-time=10
+
+{"return": {}}
+{"error": {"class": "GenericError", "desc": "Failed to get \"write\" lock"}}
+Formatting 'TEST_DIR/test.img', fmt=luks size=1048576 key-secret=keysec0 iter-time=10
+
+{"return": {}}
+{"return": {}}
+....
----------------------------------------------------------------------
-Ran 2 tests
+Ran 4 tests
OK
diff --git a/util/module.c b/util/module.c
index 90e9bd42c6..0ab00851f0 100644
--- a/util/module.c
+++ b/util/module.c
@@ -275,6 +275,9 @@ void module_load_qom_one(const char *type)
{
int i;
+ if (!type) {
+ return;
+ }
if (module_loaded_qom_all) {
return;
}