diff options
Diffstat (limited to 'hw')
-rw-r--r-- | hw/acpi/core.c | 8 | ||||
-rw-r--r-- | hw/arm/xlnx-ep108.c | 2 | ||||
-rw-r--r-- | hw/block/nand.c | 4 | ||||
-rw-r--r-- | hw/block/virtio-blk.c | 3 | ||||
-rw-r--r-- | hw/core/machine.c | 5 | ||||
-rw-r--r-- | hw/ide/atapi.c | 139 | ||||
-rw-r--r-- | hw/ide/core.c | 51 | ||||
-rw-r--r-- | hw/ide/internal.h | 14 | ||||
-rw-r--r-- | hw/ide/pci.c | 19 | ||||
-rw-r--r-- | hw/intc/arm_gic.c | 4 | ||||
-rw-r--r-- | hw/misc/ivshmem.c | 9 | ||||
-rw-r--r-- | hw/net/vhost_net.c | 14 | ||||
-rw-r--r-- | hw/pci-host/piix.c | 5 | ||||
-rw-r--r-- | hw/pci-host/q35.c | 2 | ||||
-rw-r--r-- | hw/ppc/spapr_drc.c | 5 | ||||
-rw-r--r-- | hw/tpm/tpm_tis.c | 2 | ||||
-rw-r--r-- | hw/virtio/vhost-user.c | 25 |
17 files changed, 235 insertions, 76 deletions
diff --git a/hw/acpi/core.c b/hw/acpi/core.c index fe6215af4a..21e113d713 100644 --- a/hw/acpi/core.c +++ b/hw/acpi/core.c @@ -625,8 +625,12 @@ void acpi_pm1_cnt_reset(ACPIREGS *ar) void acpi_gpe_init(ACPIREGS *ar, uint8_t len) { ar->gpe.len = len; - ar->gpe.sts = g_malloc0(len / 2); - ar->gpe.en = g_malloc0(len / 2); + /* Only first len / 2 bytes are ever used, + * but the caller in ich9.c migrates full len bytes. + * TODO: fix ich9.c and drop the extra allocation. + */ + ar->gpe.sts = g_malloc0(len); + ar->gpe.en = g_malloc0(len); } void acpi_gpe_reset(ACPIREGS *ar) diff --git a/hw/arm/xlnx-ep108.c b/hw/arm/xlnx-ep108.c index 2899698443..85b978fa76 100644 --- a/hw/arm/xlnx-ep108.c +++ b/hw/arm/xlnx-ep108.c @@ -51,7 +51,7 @@ static void xlnx_ep108_init(MachineState *machine) machine->ram_size = EP108_MAX_RAM_SIZE; } - if (machine->ram_size <= 0x08000000) { + if (machine->ram_size < 0x08000000) { qemu_log("WARNING: RAM size " RAM_ADDR_FMT " is small for EP108", machine->ram_size); } diff --git a/hw/block/nand.c b/hw/block/nand.c index 61d2cec032..a68266f887 100644 --- a/hw/block/nand.c +++ b/hw/block/nand.c @@ -522,8 +522,8 @@ void nand_setio(DeviceState *dev, uint32_t value) if (s->ale) { unsigned int shift = s->addrlen * 8; - unsigned int mask = ~(0xff << shift); - unsigned int v = value << shift; + uint64_t mask = ~(0xffull << shift); + uint64_t v = (uint64_t)value << shift; s->addr = (s->addr & mask) | v; s->addrlen ++; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index e70fccf80c..756ae5ce63 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -72,6 +72,9 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, VirtIOBlock *s = req->dev; if (action == BLOCK_ERROR_ACTION_STOP) { + /* Break the link as the next request is going to be parsed from the + * ring again. Otherwise we may end up doing a double completion! */ + req->mr_next = NULL; req->next = s->rq; s->rq = req; } else if (action == BLOCK_ERROR_ACTION_REPORT) { diff --git a/hw/core/machine.c b/hw/core/machine.c index f4db340468..acca00db22 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -462,11 +462,6 @@ bool machine_usb(MachineState *machine) return machine->usb; } -bool machine_iommu(MachineState *machine) -{ - return machine->iommu; -} - bool machine_kernel_irqchip_allowed(MachineState *machine) { return machine->kernel_irqchip_allowed; diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c index cf0b78e3a4..7b9f74c3b5 100644 --- a/hw/ide/atapi.c +++ b/hw/ide/atapi.c @@ -105,20 +105,27 @@ static void cd_data_to_raw(uint8_t *buf, int lba) memset(buf, 0, 288); } -static int cd_read_sector(IDEState *s, int lba, uint8_t *buf, int sector_size) +static int +cd_read_sector_sync(IDEState *s) { int ret; block_acct_start(blk_get_stats(s->blk), &s->acct, 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); - switch(sector_size) { +#ifdef DEBUG_IDE_ATAPI + printf("cd_read_sector_sync: lba=%d\n", s->lba); +#endif + + switch (s->cd_sector_size) { case 2048: - ret = blk_read(s->blk, (int64_t)lba << 2, buf, 4); + ret = blk_read(s->blk, (int64_t)s->lba << 2, + s->io_buffer, 4); break; case 2352: - ret = blk_read(s->blk, (int64_t)lba << 2, buf + 16, 4); + ret = blk_read(s->blk, (int64_t)s->lba << 2, + s->io_buffer + 16, 4); if (ret >= 0) { - cd_data_to_raw(buf, lba); + cd_data_to_raw(s->io_buffer, s->lba); } break; default: @@ -130,11 +137,65 @@ static int cd_read_sector(IDEState *s, int lba, uint8_t *buf, int sector_size) block_acct_failed(blk_get_stats(s->blk), &s->acct); } else { block_acct_done(blk_get_stats(s->blk), &s->acct); + s->lba++; + s->io_buffer_index = 0; } return ret; } +static void cd_read_sector_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + + block_acct_done(blk_get_stats(s->blk), &s->acct); + +#ifdef DEBUG_IDE_ATAPI + printf("cd_read_sector_cb: lba=%d ret=%d\n", s->lba, ret); +#endif + + if (ret < 0) { + ide_atapi_io_error(s, ret); + return; + } + + if (s->cd_sector_size == 2352) { + cd_data_to_raw(s->io_buffer, s->lba); + } + + s->lba++; + s->io_buffer_index = 0; + s->status &= ~BUSY_STAT; + + ide_atapi_cmd_reply_end(s); +} + +static int cd_read_sector(IDEState *s) +{ + if (s->cd_sector_size != 2048 && s->cd_sector_size != 2352) { + return -EINVAL; + } + + s->iov.iov_base = (s->cd_sector_size == 2352) ? + s->io_buffer + 16 : s->io_buffer; + + s->iov.iov_len = 4 * BDRV_SECTOR_SIZE; + qemu_iovec_init_external(&s->qiov, &s->iov, 1); + +#ifdef DEBUG_IDE_ATAPI + printf("cd_read_sector: lba=%d\n", s->lba); +#endif + + block_acct_start(blk_get_stats(s->blk), &s->acct, + 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); + + ide_buffered_readv(s, (int64_t)s->lba << 2, &s->qiov, 4, + cd_read_sector_cb, s); + + s->status |= BUSY_STAT; + return 0; +} + void ide_atapi_cmd_ok(IDEState *s) { s->error = 0; @@ -170,6 +231,17 @@ void ide_atapi_io_error(IDEState *s, int ret) } } +static uint16_t atapi_byte_count_limit(IDEState *s) +{ + uint16_t bcl; + + bcl = s->lcyl | (s->hcyl << 8); + if (bcl == 0xffff) { + return 0xfffe; + } + return bcl; +} + /* The whole ATAPI transfer logic is handled in this function */ void ide_atapi_cmd_reply_end(IDEState *s) { @@ -185,18 +257,27 @@ void ide_atapi_cmd_reply_end(IDEState *s) ide_atapi_cmd_ok(s); ide_set_irq(s->bus); #ifdef DEBUG_IDE_ATAPI - printf("status=0x%x\n", s->status); + printf("end of transfer, status=0x%x\n", s->status); #endif } else { /* see if a new sector must be read */ if (s->lba != -1 && s->io_buffer_index >= s->cd_sector_size) { - ret = cd_read_sector(s, s->lba, s->io_buffer, s->cd_sector_size); - if (ret < 0) { - ide_atapi_io_error(s, ret); + if (!s->elementary_transfer_size) { + ret = cd_read_sector(s); + if (ret < 0) { + ide_atapi_io_error(s, ret); + } return; + } else { + /* rebuffering within an elementary transfer is + * only possible with a sync request because we + * end up with a race condition otherwise */ + ret = cd_read_sector_sync(s); + if (ret < 0) { + ide_atapi_io_error(s, ret); + return; + } } - s->lba++; - s->io_buffer_index = 0; } if (s->elementary_transfer_size > 0) { /* there are some data left to transmit in this elementary @@ -212,12 +293,10 @@ void ide_atapi_cmd_reply_end(IDEState *s) } else { /* a new transfer is needed */ s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO; - byte_count_limit = s->lcyl | (s->hcyl << 8); + byte_count_limit = atapi_byte_count_limit(s); #ifdef DEBUG_IDE_ATAPI printf("byte_count_limit=%d\n", byte_count_limit); #endif - if (byte_count_limit == 0xffff) - byte_count_limit--; size = s->packet_transfer_size; if (size > byte_count_limit) { /* byte count limit must be even if this case */ @@ -278,7 +357,6 @@ static void ide_atapi_cmd_read_pio(IDEState *s, int lba, int nb_sectors, s->io_buffer_index = sector_size; s->cd_sector_size = sector_size; - s->status = READY_STAT | SEEK_STAT; ide_atapi_cmd_reply_end(s); } @@ -354,16 +432,16 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret) s->bus->dma->iov.iov_len = n * 4 * 512; qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1); - s->bus->dma->aiocb = blk_aio_readv(s->blk, (int64_t)s->lba << 2, - &s->bus->dma->qiov, n * 4, - ide_atapi_cmd_read_dma_cb, s); + s->bus->dma->aiocb = ide_buffered_readv(s, (int64_t)s->lba << 2, + &s->bus->dma->qiov, n * 4, + ide_atapi_cmd_read_dma_cb, s); return; eot: if (ret < 0) { block_acct_failed(blk_get_stats(s->blk), &s->acct); } else { - block_acct_done(blk_get_stats(s->blk), &s->acct); + block_acct_done(blk_get_stats(s->blk), &s->acct); } ide_set_inactive(s, false); } @@ -1186,7 +1264,7 @@ enum { NONDATA = 0x04, }; -static const struct { +static const struct AtapiCmd { void (*handler)(IDEState *s, uint8_t *buf); int flags; } atapi_cmd_table[0x100] = { @@ -1213,9 +1291,9 @@ static const struct { void ide_atapi_cmd(IDEState *s) { - uint8_t *buf; + uint8_t *buf = s->io_buffer; + const struct AtapiCmd *cmd = &atapi_cmd_table[s->io_buffer[0]]; - buf = s->io_buffer; #ifdef DEBUG_IDE_ATAPI { int i; @@ -1226,14 +1304,14 @@ void ide_atapi_cmd(IDEState *s) printf("\n"); } #endif + /* * If there's a UNIT_ATTENTION condition pending, only command flagged with * ALLOW_UA are allowed to complete. with other commands getting a CHECK * condition response unless a higher priority status, defined by the drive * here, is pending. */ - if (s->sense_key == UNIT_ATTENTION && - !(atapi_cmd_table[s->io_buffer[0]].flags & ALLOW_UA)) { + if (s->sense_key == UNIT_ATTENTION && !(cmd->flags & ALLOW_UA)) { ide_atapi_cmd_check_status(s); return; } @@ -1244,7 +1322,7 @@ void ide_atapi_cmd(IDEState *s) * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close * states rely on this behavior. */ - if (!(atapi_cmd_table[s->io_buffer[0]].flags & ALLOW_UA) && + if (!(cmd->flags & ALLOW_UA) && !s->tray_open && blk_is_inserted(s->blk) && s->cdrom_changed) { if (s->cdrom_changed == 1) { @@ -1259,7 +1337,7 @@ void ide_atapi_cmd(IDEState *s) } /* Report a Not Ready condition if appropriate for the command */ - if ((atapi_cmd_table[s->io_buffer[0]].flags & CHECK_READY) && + if ((cmd->flags & CHECK_READY) && (!media_present(s) || !blk_is_inserted(s->blk))) { ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT); @@ -1270,10 +1348,9 @@ void ide_atapi_cmd(IDEState *s) * If this is a data-transferring PIO command and BCL is 0, * we abort at the /ATA/ level, not the ATAPI level. * See ATA8 ACS3 section 7.17.6.49 and 7.21.5 */ - if (!(atapi_cmd_table[s->io_buffer[0]].flags & NONDATA)) { + if (cmd->handler && !(cmd->flags & NONDATA)) { /* TODO: Check IDENTIFY data word 125 for default BCL (currently 0) */ - uint16_t byte_count_limit = s->lcyl | (s->hcyl << 8); - if (!(byte_count_limit || s->atapi_dma)) { + if (!(atapi_byte_count_limit(s) || s->atapi_dma)) { /* TODO: Move abort back into core.c and make static inline again */ ide_abort_command(s); return; @@ -1281,8 +1358,8 @@ void ide_atapi_cmd(IDEState *s) } /* Execute the command */ - if (atapi_cmd_table[s->io_buffer[0]].handler) { - atapi_cmd_table[s->io_buffer[0]].handler(s, buf); + if (cmd->handler) { + cmd->handler(s, buf); return; } diff --git a/hw/ide/core.c b/hw/ide/core.c index 2725dd3b81..da3baab1eb 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -561,6 +561,53 @@ static bool ide_sect_range_ok(IDEState *s, return true; } +static void ide_buffered_readv_cb(void *opaque, int ret) +{ + IDEBufferedRequest *req = opaque; + if (!req->orphaned) { + if (!ret) { + qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base, + req->original_qiov->size); + } + req->original_cb(req->original_opaque, ret); + } + QLIST_REMOVE(req, list); + qemu_vfree(req->iov.iov_base); + g_free(req); +} + +#define MAX_BUFFERED_REQS 16 + +BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque) +{ + BlockAIOCB *aioreq; + IDEBufferedRequest *req; + int c = 0; + + QLIST_FOREACH(req, &s->buffered_requests, list) { + c++; + } + if (c > MAX_BUFFERED_REQS) { + return blk_abort_aio_request(s->blk, cb, opaque, -EIO); + } + + req = g_new0(IDEBufferedRequest, 1); + req->original_qiov = iov; + req->original_cb = cb; + req->original_opaque = opaque; + req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size); + req->iov.iov_len = iov->size; + qemu_iovec_init_external(&req->qiov, &req->iov, 1); + + aioreq = blk_aio_readv(s->blk, sector_num, &req->qiov, nb_sectors, + ide_buffered_readv_cb, req); + + QLIST_INSERT_HEAD(&s->buffered_requests, req, list); + return aioreq; +} + static void ide_sector_read(IDEState *s); static void ide_sector_read_cb(void *opaque, int ret) @@ -632,8 +679,8 @@ static void ide_sector_read(IDEState *s) block_acct_start(blk_get_stats(s->blk), &s->acct, n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); - s->pio_aiocb = blk_aio_readv(s->blk, sector_num, &s->qiov, n, - ide_sector_read_cb, s); + s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n, + ide_sector_read_cb, s); } void dma_buf_commit(IDEState *s, uint32_t tx_bytes) diff --git a/hw/ide/internal.h b/hw/ide/internal.h index e4629b023a..2d1e2d2d2f 100644 --- a/hw/ide/internal.h +++ b/hw/ide/internal.h @@ -343,6 +343,16 @@ enum ide_dma_cmd { #define ide_cmd_is_read(s) \ ((s)->dma_cmd == IDE_DMA_READ) +typedef struct IDEBufferedRequest { + QLIST_ENTRY(IDEBufferedRequest) list; + struct iovec iov; + QEMUIOVector qiov; + QEMUIOVector *original_qiov; + BlockCompletionFunc *original_cb; + void *original_opaque; + bool orphaned; +} IDEBufferedRequest; + /* NOTE: IDEState represents in fact one drive */ struct IDEState { IDEBus *bus; @@ -396,6 +406,7 @@ struct IDEState { BlockAIOCB *pio_aiocb; struct iovec iov; QEMUIOVector qiov; + QLIST_HEAD(, IDEBufferedRequest) buffered_requests; /* ATA DMA state */ uint64_t io_buffer_offset; int32_t io_buffer_size; @@ -572,6 +583,9 @@ void ide_set_inactive(IDEState *s, bool more); BlockAIOCB *ide_issue_trim(BlockBackend *blk, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque); /* hw/ide/atapi.c */ void ide_atapi_cmd(IDEState *s); diff --git a/hw/ide/pci.c b/hw/ide/pci.c index 9c54b378d6..37dbc291da 100644 --- a/hw/ide/pci.c +++ b/hw/ide/pci.c @@ -233,6 +233,22 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val) /* Ignore writes to SSBM if it keeps the old value */ if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { if (!(val & BM_CMD_START)) { + /* First invoke the callbacks of all buffered requests + * and flag those requests as orphaned. Ideally there + * are no unbuffered (Scatter Gather DMA Requests or + * write requests) pending and we can avoid to drain. */ + IDEBufferedRequest *req; + IDEState *s = idebus_active_if(bm->bus); + QLIST_FOREACH(req, &s->buffered_requests, list) { + if (!req->orphaned) { +#ifdef DEBUG_IDE + printf("%s: invoking cb %p of buffered request %p with" + " -ECANCELED\n", __func__, req->original_cb, req); +#endif + req->original_cb(req->original_opaque, -ECANCELED); + } + req->orphaned = true; + } /* * We can't cancel Scatter Gather DMA in the middle of the * operation or a partial (not full) DMA transfer would reach @@ -246,6 +262,9 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val) * aio operation with preadv/pwritev. */ if (bm->bus->dma->aiocb) { +#ifdef DEBUG_IDE + printf("%s: draining all remaining requests", __func__); +#endif blk_drain_all(); assert(bm->bus->dma->aiocb == NULL); } diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c index d71aeb8a2a..13e297d52e 100644 --- a/hw/intc/arm_gic.c +++ b/hw/intc/arm_gic.c @@ -254,9 +254,9 @@ static void gic_activate_irq(GICState *s, int cpu, int irq) int bitno = preemption_level % 32; if (gic_has_groups(s) && GIC_TEST_GROUP(irq, (1 << cpu))) { - s->nsapr[regno][cpu] &= (1 << bitno); + s->nsapr[regno][cpu] |= (1 << bitno); } else { - s->apr[regno][cpu] &= (1 << bitno); + s->apr[regno][cpu] |= (1 << bitno); } s->running_priority[cpu] = prio; diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index 83d7bd3e5f..f73f0c2b17 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -60,8 +60,6 @@ #define IVSHMEM(obj) \ OBJECT_CHECK(IVShmemState, (obj), TYPE_IVSHMEM) -#define IVSHMEM_MEMDEV_PROP "memdev" - typedef struct Peer { int nb_eventfds; EventNotifier *eventfds; @@ -857,8 +855,8 @@ static void pci_ivshmem_realize(PCIDevice *dev, Error **errp) PCI_BASE_ADDRESS_MEM_PREFETCH; if (!!s->server_chr + !!s->shmobj + !!s->hostmem != 1) { - error_setg(errp, "You must specify either a shmobj, a chardev" - " or a hostmem"); + error_setg(errp, + "You must specify either 'shm', 'chardev' or 'x-memdev'"); return; } @@ -939,6 +937,7 @@ static void pci_ivshmem_realize(PCIDevice *dev, Error **errp) memory_region_add_subregion(&s->bar, 0, mr); pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar); } else if (s->server_chr != NULL) { + /* FIXME do not rely on what chr drivers put into filename */ if (strncmp(s->server_chr->filename, "unix:", 5)) { error_setg(errp, "chardev is not a unix client socket"); return; @@ -1181,7 +1180,7 @@ static void ivshmem_init(Object *obj) { IVShmemState *s = IVSHMEM(obj); - object_property_add_link(obj, IVSHMEM_MEMDEV_PROP, TYPE_MEMORY_BACKEND, + object_property_add_link(obj, "x-memdev", TYPE_MEMORY_BACKEND, (Object **)&s->hostmem, ivshmem_check_memdev_is_busy, OBJ_PROP_LINK_UNREF_ON_RELEASE, diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index d91b7b155e..318c3e6ad2 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -77,14 +77,8 @@ static const int user_feature_bits[] = { VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_MRG_RXBUF, - VIRTIO_NET_F_STATUS, - VIRTIO_NET_F_CTRL_VQ, - VIRTIO_NET_F_CTRL_RX, - VIRTIO_NET_F_CTRL_VLAN, - VIRTIO_NET_F_CTRL_RX_EXTRA, - VIRTIO_NET_F_CTRL_MAC_ADDR, - VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, + /* This bit implies RARP isn't sent by QEMU out of band */ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, @@ -292,12 +286,6 @@ static void vhost_net_stop_one(struct vhost_net *net, int r = vhost_ops->vhost_net_set_backend(&net->dev, &file); assert(r >= 0); } - } else if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) { - for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { - const VhostOps *vhost_ops = net->dev.vhost_ops; - int r = vhost_ops->vhost_reset_device(&net->dev); - assert(r >= 0); - } } if (net->nc->info->poll) { net->nc->info->poll(net->nc, true); diff --git a/hw/pci-host/piix.c b/hw/pci-host/piix.c index 7b2fbf9598..715208b22a 100644 --- a/hw/pci-host/piix.c +++ b/hw/pci-host/piix.c @@ -34,6 +34,7 @@ #include "sysemu/sysemu.h" #include "hw/i386/ioapic.h" #include "qapi/visitor.h" +#include "qemu/error-report.h" /* * I440FX chipset data sheet. @@ -301,6 +302,10 @@ static void i440fx_pcihost_realize(DeviceState *dev, Error **errp) static void i440fx_realize(PCIDevice *dev, Error **errp) { dev->config[I440FX_SMRAM] = 0x02; + + if (object_property_get_bool(qdev_get_machine(), "iommu", NULL)) { + error_report("warning: i440fx doesn't support emulated iommu"); + } } PCIBus *i440fx_init(const char *host_type, const char *pci_type, diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c index c81507d710..1fb470758b 100644 --- a/hw/pci-host/q35.c +++ b/hw/pci-host/q35.c @@ -506,7 +506,7 @@ static void mch_realize(PCIDevice *d, Error **errp) PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); } /* Intel IOMMU (VT-d) */ - if (machine_iommu(current_machine)) { + if (object_property_get_bool(qdev_get_machine(), "iommu", NULL)) { mch_init_dmar(mch); } } diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index 5d6ea7ce41..f34bc04c48 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -657,6 +657,7 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner, { Object *root_container; ObjectProperty *prop; + ObjectPropertyIterator *iter; uint32_t drc_count = 0; GArray *drc_indexes, *drc_power_domains; GString *drc_names, *drc_types; @@ -680,7 +681,8 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner, */ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); - QTAILQ_FOREACH(prop, &root_container->properties, node) { + iter = object_property_iter_init(root_container); + while ((prop = object_property_iter_next(iter))) { Object *obj; sPAPRDRConnector *drc; sPAPRDRConnectorClass *drck; @@ -721,6 +723,7 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner, spapr_drc_get_type_str(drc->type)); drc_types = g_string_insert_len(drc_types, -1, "\0", 1); } + object_property_iter_free(iter); /* now write the drc count into the space we reserved at the * beginning of the arrays previously diff --git a/hw/tpm/tpm_tis.c b/hw/tpm/tpm_tis.c index 0806b5f82e..ff073d501a 100644 --- a/hw/tpm/tpm_tis.c +++ b/hw/tpm/tpm_tis.c @@ -141,7 +141,7 @@ #define TPM_TIS_IFACE_ID_SUPPORTED_FLAGS1_3 \ (TPM_TIS_IFACE_ID_INTERFACE_TIS1_3 | \ - (~0 << 4)/* all of it is don't care */) + (~0u << 4)/* all of it is don't care */) /* if backend was a TPM 2.0: */ #define TPM_TIS_IFACE_ID_SUPPORTED_FLAGS2_0 \ diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index c44360219f..1b6c5ac238 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -121,8 +121,8 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) r = qemu_chr_fe_read_all(chr, p, size); if (r != size) { - error_report("Failed to read msg header. Read %d instead of %d.", r, - size); + error_report("Failed to read msg header. Read %d instead of %d." + " Original request %d.", r, size, msg->request); goto fail; } @@ -206,7 +206,7 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, VhostUserMsg msg = { .request = VHOST_USER_SET_LOG_BASE, .flags = VHOST_USER_VERSION, - .payload.log.mmap_size = log->size, + .payload.log.mmap_size = log->size * sizeof(*(log->log)), .payload.log.mmap_offset = 0, .size = sizeof(msg.payload.log), }; @@ -333,18 +333,23 @@ static int vhost_user_set_vring_base(struct vhost_dev *dev, static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) { - struct vhost_vring_state state = { - .index = dev->vq_index, - .num = enable, - }; + int i; - if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ))) { + if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { return -1; } - return vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); -} + for (i = 0; i < dev->nvqs; ++i) { + struct vhost_vring_state state = { + .index = dev->vq_index + i, + .num = enable, + }; + + vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); + } + return 0; +} static int vhost_user_get_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) |