aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/iscsi.c189
-rwxr-xr-xconfigure39
-rw-r--r--hw/display/qxl.c2
-rw-r--r--hw/scsi/megasas.c16
-rw-r--r--hw/scsi/scsi-bus.c4
-rw-r--r--hw/scsi/scsi-disk.c7
-rw-r--r--hw/scsi/spapr_vscsi.c5
-rw-r--r--hw/scsi/vhost-scsi.c8
-rw-r--r--hw/scsi/virtio-scsi.c328
-rw-r--r--include/block/scsi.h2
-rw-r--r--include/hw/i386/pc.h4
-rw-r--r--include/hw/virtio/virtio-scsi.h11
-rw-r--r--include/qemu-common.h6
-rw-r--r--include/qemu/aes.h9
-rw-r--r--tcg/optimize.c9
-rw-r--r--ui/spice-display.c2
-rw-r--r--util/iov.c10
-rw-r--r--util/oslib-win32.c252
18 files changed, 524 insertions, 379 deletions
diff --git a/block/iscsi.c b/block/iscsi.c
index 6f87605e72..84aa22a62e 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -26,6 +26,7 @@
#include "config-host.h"
#include <poll.h>
+#include <math.h>
#include <arpa/inet.h>
#include "qemu-common.h"
#include "qemu/config-file.h"
@@ -64,6 +65,7 @@ typedef struct IscsiLun {
unsigned char *zeroblock;
unsigned long *allocationmap;
int cluster_sectors;
+ bool use_16_for_rw;
} IscsiLun;
typedef struct IscsiTask {
@@ -75,6 +77,7 @@ typedef struct IscsiTask {
Coroutine *co;
QEMUBH *bh;
IscsiLun *iscsilun;
+ QEMUTimer retry_timer;
} IscsiTask;
typedef struct IscsiAIOCB {
@@ -86,7 +89,6 @@ typedef struct IscsiAIOCB {
uint8_t *buf;
int status;
int canceled;
- int retries;
int64_t sector_num;
int nb_sectors;
#ifdef __linux__
@@ -96,7 +98,8 @@ typedef struct IscsiAIOCB {
#define NOP_INTERVAL 5000
#define MAX_NOP_FAILURES 3
-#define ISCSI_CMD_RETRIES 5
+#define ISCSI_CMD_RETRIES ARRAY_SIZE(iscsi_retry_times)
+static const unsigned iscsi_retry_times[] = {8, 32, 128, 512, 2048};
/* this threshhold is a trade-off knob to choose between
* the potential additional overhead of an extra GET_LBA_STATUS request
@@ -142,10 +145,25 @@ iscsi_schedule_bh(IscsiAIOCB *acb)
static void iscsi_co_generic_bh_cb(void *opaque)
{
struct IscsiTask *iTask = opaque;
+ iTask->complete = 1;
qemu_bh_delete(iTask->bh);
qemu_coroutine_enter(iTask->co, NULL);
}
+static void iscsi_retry_timer_expired(void *opaque)
+{
+ struct IscsiTask *iTask = opaque;
+ iTask->complete = 1;
+ if (iTask->co) {
+ qemu_coroutine_enter(iTask->co, NULL);
+ }
+}
+
+static inline unsigned exp_random(double mean)
+{
+ return -mean * log((double)rand() / RAND_MAX);
+}
+
static void
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
void *command_data, void *opaque)
@@ -153,19 +171,34 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
struct IscsiTask *iTask = opaque;
struct scsi_task *task = command_data;
- iTask->complete = 1;
iTask->status = status;
iTask->do_retry = 0;
iTask->task = task;
- if (iTask->retries-- > 0 && status == SCSI_STATUS_CHECK_CONDITION
- && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
- error_report("iSCSI CheckCondition: %s", iscsi_get_error(iscsi));
- iTask->do_retry = 1;
- goto out;
- }
-
if (status != SCSI_STATUS_GOOD) {
+ if (iTask->retries++ < ISCSI_CMD_RETRIES) {
+ if (status == SCSI_STATUS_CHECK_CONDITION
+ && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
+ error_report("iSCSI CheckCondition: %s",
+ iscsi_get_error(iscsi));
+ iTask->do_retry = 1;
+ goto out;
+ }
+ if (status == SCSI_STATUS_BUSY) {
+ unsigned retry_time =
+ exp_random(iscsi_retry_times[iTask->retries - 1]);
+ error_report("iSCSI Busy (retry #%u in %u ms): %s",
+ iTask->retries, retry_time,
+ iscsi_get_error(iscsi));
+ aio_timer_init(iTask->iscsilun->aio_context,
+ &iTask->retry_timer, QEMU_CLOCK_REALTIME,
+ SCALE_MS, iscsi_retry_timer_expired, iTask);
+ timer_mod(&iTask->retry_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time);
+ iTask->do_retry = 1;
+ return;
+ }
+ }
error_report("iSCSI Failure: %s", iscsi_get_error(iscsi));
}
@@ -174,15 +207,16 @@ out:
iTask->bh = aio_bh_new(iTask->iscsilun->aio_context,
iscsi_co_generic_bh_cb, iTask);
qemu_bh_schedule(iTask->bh);
+ } else {
+ iTask->complete = 1;
}
}
static void iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask)
{
*iTask = (struct IscsiTask) {
- .co = qemu_coroutine_self(),
- .retries = ISCSI_CMD_RETRIES,
- .iscsilun = iscsilun,
+ .co = qemu_coroutine_self(),
+ .iscsilun = iscsilun,
};
}
@@ -325,8 +359,6 @@ static int coroutine_fn iscsi_co_writev(BlockDriverState *bs,
struct IscsiTask iTask;
uint64_t lba;
uint32_t num_sectors;
- uint8_t *data = NULL;
- uint8_t *buf = NULL;
if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
return -EINVAL;
@@ -334,31 +366,24 @@ static int coroutine_fn iscsi_co_writev(BlockDriverState *bs,
lba = sector_qemu2lun(sector_num, iscsilun);
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
-#if !defined(LIBISCSI_FEATURE_IOVECTOR)
- /* if the iovec only contains one buffer we can pass it directly */
- if (iov->niov == 1) {
- data = iov->iov[0].iov_base;
- } else {
- size_t size = MIN(nb_sectors * BDRV_SECTOR_SIZE, iov->size);
- buf = g_malloc(size);
- qemu_iovec_to_buf(iov, 0, buf, size);
- data = buf;
- }
-#endif
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
- iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba,
- data, num_sectors * iscsilun->block_size,
- iscsilun->block_size, 0, 0, 0, 0, 0,
- iscsi_co_generic_cb, &iTask);
+ if (iscsilun->use_16_for_rw) {
+ iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba,
+ NULL, num_sectors * iscsilun->block_size,
+ iscsilun->block_size, 0, 0, 0, 0, 0,
+ iscsi_co_generic_cb, &iTask);
+ } else {
+ iTask.task = iscsi_write10_task(iscsilun->iscsi, iscsilun->lun, lba,
+ NULL, num_sectors * iscsilun->block_size,
+ iscsilun->block_size, 0, 0, 0, 0, 0,
+ iscsi_co_generic_cb, &iTask);
+ }
if (iTask.task == NULL) {
- g_free(buf);
return -ENOMEM;
}
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
scsi_task_set_iov_out(iTask.task, (struct scsi_iovec *) iov->iov,
iov->niov);
-#endif
while (!iTask.complete) {
iscsi_set_events(iscsilun);
qemu_coroutine_yield();
@@ -374,8 +399,6 @@ retry:
goto retry;
}
- g_free(buf);
-
if (iTask.status != SCSI_STATUS_GOOD) {
return -EIO;
}
@@ -386,7 +409,6 @@ retry:
}
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
static bool iscsi_allocationmap_is_allocated(IscsiLun *iscsilun,
int64_t sector_num, int nb_sectors)
{
@@ -496,9 +518,6 @@ out:
return ret;
}
-#endif /* LIBISCSI_FEATURE_IOVECTOR */
-
-
static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors,
QEMUIOVector *iov)
@@ -507,15 +526,11 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
struct IscsiTask iTask;
uint64_t lba;
uint32_t num_sectors;
-#if !defined(LIBISCSI_FEATURE_IOVECTOR)
- int i;
-#endif
if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
return -EINVAL;
}
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
if (iscsilun->lbprz && nb_sectors >= ISCSI_CHECKALLOC_THRES &&
!iscsi_allocationmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
int64_t ret;
@@ -529,42 +544,28 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
return 0;
}
}
-#endif
lba = sector_qemu2lun(sector_num, iscsilun);
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
- switch (iscsilun->type) {
- case TYPE_DISK:
+ if (iscsilun->use_16_for_rw) {
iTask.task = iscsi_read16_task(iscsilun->iscsi, iscsilun->lun, lba,
num_sectors * iscsilun->block_size,
iscsilun->block_size, 0, 0, 0, 0, 0,
iscsi_co_generic_cb, &iTask);
- break;
- default:
+ } else {
iTask.task = iscsi_read10_task(iscsilun->iscsi, iscsilun->lun, lba,
num_sectors * iscsilun->block_size,
iscsilun->block_size,
-#if !defined(CONFIG_LIBISCSI_1_4) /* API change from 1.4.0 to 1.5.0 */
0, 0, 0, 0, 0,
-#endif
iscsi_co_generic_cb, &iTask);
- break;
}
if (iTask.task == NULL) {
return -ENOMEM;
}
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
scsi_task_set_iov_in(iTask.task, (struct scsi_iovec *) iov->iov, iov->niov);
-#else
- for (i = 0; i < iov->niov; i++) {
- scsi_task_add_data_in_buffer(iTask.task,
- iov->iov[i].iov_len,
- iov->iov[i].iov_base);
- }
-#endif
while (!iTask.complete) {
iscsi_set_events(iscsilun);
@@ -719,18 +720,9 @@ static BlockDriverAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
data.data = acb->ioh->dxferp;
data.size = acb->ioh->dxfer_len;
} else {
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
scsi_task_set_iov_out(acb->task,
(struct scsi_iovec *) acb->ioh->dxferp,
acb->ioh->iovec_count);
-#else
- struct iovec *iov = (struct iovec *)acb->ioh->dxferp;
-
- acb->buf = g_malloc(acb->ioh->dxfer_len);
- data.data = acb->buf;
- data.size = iov_to_buf(iov, acb->ioh->iovec_count, 0,
- acb->buf, acb->ioh->dxfer_len);
-#endif
}
}
@@ -750,20 +742,9 @@ static BlockDriverAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
acb->ioh->dxfer_len,
acb->ioh->dxferp);
} else {
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
scsi_task_set_iov_in(acb->task,
(struct scsi_iovec *) acb->ioh->dxferp,
acb->ioh->iovec_count);
-#else
- int i;
- for (i = 0; i < acb->ioh->iovec_count; i++) {
- struct iovec *iov = (struct iovec *)acb->ioh->dxferp;
-
- scsi_task_add_data_in_buffer(acb->task,
- iov[i].iov_len,
- iov[i].iov_base);
- }
-#endif
}
}
@@ -772,7 +753,6 @@ static BlockDriverAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
return &acb->common;
}
-
static void ioctl_cb(void *opaque, int status)
{
int *p_status = opaque;
@@ -877,8 +857,6 @@ retry:
return 0;
}
-#if defined(SCSI_SENSE_ASCQ_CAPACITY_DATA_HAS_CHANGED)
-
static int
coroutine_fn iscsi_co_write_zeroes(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, BdrvRequestFlags flags)
@@ -887,19 +865,27 @@ coroutine_fn iscsi_co_write_zeroes(BlockDriverState *bs, int64_t sector_num,
struct IscsiTask iTask;
uint64_t lba;
uint32_t nb_blocks;
+ bool use_16_for_ws = iscsilun->use_16_for_rw;
if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
return -EINVAL;
}
- if ((flags & BDRV_REQ_MAY_UNMAP) && !iscsilun->lbp.lbpws) {
- /* WRITE SAME with UNMAP is not supported by the target,
- * fall back and try WRITE SAME without UNMAP */
- flags &= ~BDRV_REQ_MAY_UNMAP;
+ if (flags & BDRV_REQ_MAY_UNMAP) {
+ if (!use_16_for_ws && !iscsilun->lbp.lbpws10) {
+ /* WRITESAME10 with UNMAP is unsupported try WRITESAME16 */
+ use_16_for_ws = true;
+ }
+ if (use_16_for_ws && !iscsilun->lbp.lbpws) {
+ /* WRITESAME16 with UNMAP is not supported by the target,
+ * fall back and try WRITESAME10/16 without UNMAP */
+ flags &= ~BDRV_REQ_MAY_UNMAP;
+ use_16_for_ws = iscsilun->use_16_for_rw;
+ }
}
if (!(flags & BDRV_REQ_MAY_UNMAP) && !iscsilun->has_write_same) {
- /* WRITE SAME without UNMAP is not supported by the target */
+ /* WRITESAME without UNMAP is not supported by the target */
return -ENOTSUP;
}
@@ -912,10 +898,18 @@ coroutine_fn iscsi_co_write_zeroes(BlockDriverState *bs, int64_t sector_num,
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
- if (iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba,
- iscsilun->zeroblock, iscsilun->block_size,
- nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
- 0, 0, iscsi_co_generic_cb, &iTask) == NULL) {
+ if (use_16_for_ws) {
+ iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba,
+ iscsilun->zeroblock, iscsilun->block_size,
+ nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
+ 0, 0, iscsi_co_generic_cb, &iTask);
+ } else {
+ iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba,
+ iscsilun->zeroblock, iscsilun->block_size,
+ nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
+ 0, 0, iscsi_co_generic_cb, &iTask);
+ }
+ if (iTask.task == NULL) {
return -ENOMEM;
}
@@ -957,8 +951,6 @@ retry:
return 0;
}
-#endif /* SCSI_SENSE_ASCQ_CAPACITY_DATA_HAS_CHANGED */
-
static void parse_chap(struct iscsi_context *iscsi, const char *target,
Error **errp)
{
@@ -1068,7 +1060,6 @@ static char *parse_initiator_name(const char *target)
return iscsi_name;
}
-#if defined(LIBISCSI_FEATURE_NOP_COUNTER)
static void iscsi_nop_timed_event(void *opaque)
{
IscsiLun *iscsilun = opaque;
@@ -1086,7 +1077,6 @@ static void iscsi_nop_timed_event(void *opaque)
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
iscsi_set_events(iscsilun);
}
-#endif
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
{
@@ -1113,6 +1103,7 @@ static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
iscsilun->num_blocks = rc16->returned_lba + 1;
iscsilun->lbpme = rc16->lbpme;
iscsilun->lbprz = rc16->lbprz;
+ iscsilun->use_16_for_rw = (rc16->returned_lba > 0xffffffff);
}
}
break;
@@ -1224,14 +1215,12 @@ static void iscsi_attach_aio_context(BlockDriverState *bs,
iscsilun->aio_context = new_context;
iscsi_set_events(iscsilun);
-#if defined(LIBISCSI_FEATURE_NOP_COUNTER)
/* Set up a timer for sending out iSCSI NOPs */
iscsilun->nop_timer = aio_timer_new(iscsilun->aio_context,
QEMU_CLOCK_REALTIME, SCALE_MS,
iscsi_nop_timed_event, iscsilun);
timer_mod(iscsilun->nop_timer,
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
-#endif
}
/*
@@ -1423,13 +1412,11 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
iscsilun->bl.opt_unmap_gran * iscsilun->block_size <= 16 * 1024 * 1024) {
iscsilun->cluster_sectors = (iscsilun->bl.opt_unmap_gran *
iscsilun->block_size) >> BDRV_SECTOR_BITS;
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
if (iscsilun->lbprz && !(bs->open_flags & BDRV_O_NOCACHE)) {
iscsilun->allocationmap =
bitmap_new(DIV_ROUND_UP(bs->total_sectors,
iscsilun->cluster_sectors));
}
-#endif
}
out:
@@ -1614,13 +1601,9 @@ static BlockDriver bdrv_iscsi = {
.bdrv_truncate = iscsi_truncate,
.bdrv_refresh_limits = iscsi_refresh_limits,
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
.bdrv_co_get_block_status = iscsi_co_get_block_status,
-#endif
.bdrv_co_discard = iscsi_co_discard,
-#if defined(SCSI_SENSE_ASCQ_CAPACITY_DATA_HAS_CHANGED)
.bdrv_co_write_zeroes = iscsi_co_write_zeroes,
-#endif
.bdrv_co_readv = iscsi_co_readv,
.bdrv_co_writev = iscsi_co_writev,
.bdrv_co_flush_to_disk = iscsi_co_flush,
diff --git a/configure b/configure
index 27d84d9fdb..a69e90b570 100755
--- a/configure
+++ b/configure
@@ -3405,46 +3405,20 @@ if compile_prog "" "" ; then
fi
##########################################
-# Do we have libiscsi
-# We check for iscsi_write16_sync() to make sure we have a
-# at least version 1.4.0 of libiscsi.
+# Do we have libiscsi >= 1.9.0
if test "$libiscsi" != "no" ; then
- cat > $TMPC << EOF
-#include <stdio.h>
-#include <iscsi/iscsi.h>
-int main(void) { iscsi_write16_sync(NULL,0,0,NULL,0,0,0,0,0,0,0); return 0; }
-EOF
- if $pkg_config --atleast-version=1.7.0 libiscsi; then
+ if $pkg_config --atleast-version=1.9.0 libiscsi; then
libiscsi="yes"
libiscsi_cflags=$($pkg_config --cflags libiscsi)
libiscsi_libs=$($pkg_config --libs libiscsi)
- elif compile_prog "" "-liscsi" ; then
- libiscsi="yes"
- libiscsi_libs="-liscsi"
else
if test "$libiscsi" = "yes" ; then
- feature_not_found "libiscsi" "Install libiscsi devel"
+ feature_not_found "libiscsi" "Install libiscsi >= 1.9.0"
fi
libiscsi="no"
fi
fi
-# We also need to know the API version because there was an
-# API change from 1.4.0 to 1.5.0.
-if test "$libiscsi" = "yes"; then
- cat >$TMPC <<EOF
-#include <iscsi/iscsi.h>
-int main(void)
-{
- iscsi_read10_task(0, 0, 0, 0, 0, 0, 0);
- return 0;
-}
-EOF
- if compile_prog "" "-liscsi"; then
- libiscsi_version="1.4.0"
- fi
-fi
-
##########################################
# Do we need libm
cat > $TMPC << EOF
@@ -4218,11 +4192,7 @@ echo "nss used $smartcard_nss"
echo "libusb $libusb"
echo "usb net redir $usb_redir"
echo "GLX support $glx"
-if test "$libiscsi_version" = "1.4.0"; then
-echo "libiscsi support $libiscsi (1.4.0)"
-else
echo "libiscsi support $libiscsi"
-fi
echo "libnfs support $libnfs"
echo "build guest agent $guest_agent"
echo "QGA VSS support $guest_agent_with_vss"
@@ -4579,9 +4549,6 @@ fi
if test "$libiscsi" = "yes" ; then
echo "CONFIG_LIBISCSI=m" >> $config_host_mak
- if test "$libiscsi_version" = "1.4.0"; then
- echo "CONFIG_LIBISCSI_1_4=y" >> $config_host_mak
- fi
echo "LIBISCSI_CFLAGS=$libiscsi_cflags" >> $config_host_mak
echo "LIBISCSI_LIBS=$libiscsi_libs" >> $config_host_mak
fi
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index 736fd3c4e2..d43aa49eb8 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -710,7 +710,7 @@ static void interface_release_resource(QXLInstance *sin,
if (ext.group_id == MEMSLOT_GROUP_HOST) {
/* host group -> vga mode update request */
- QXLCommandExt *cmdext = (void *)(ext.info->id);
+ QXLCommandExt *cmdext = (void *)(intptr_t)(ext.info->id);
SimpleSpiceUpdate *update;
g_assert(cmdext->cmd.type == QXL_CMD_DRAW);
update = container_of(cmdext, SimpleSpiceUpdate, ext);
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index b05c47abdc..c68a873f18 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -294,6 +294,7 @@ static void megasas_unmap_sgl(MegasasCmd *cmd)
static int megasas_build_sense(MegasasCmd *cmd, uint8_t *sense_ptr,
uint8_t sense_len)
{
+ PCIDevice *pcid = PCI_DEVICE(cmd->state);
uint32_t pa_hi = 0, pa_lo;
hwaddr pa;
@@ -306,7 +307,7 @@ static int megasas_build_sense(MegasasCmd *cmd, uint8_t *sense_ptr,
pa_hi = le32_to_cpu(cmd->frame->pass.sense_addr_hi);
}
pa = ((uint64_t) pa_hi << 32) | pa_lo;
- cpu_physical_memory_write(pa, sense_ptr, sense_len);
+ pci_dma_write(pcid, pa, sense_ptr, sense_len);
cmd->frame->header.sense_len = sense_len;
}
return sense_len;
@@ -472,6 +473,7 @@ static MegasasCmd *megasas_next_frame(MegasasState *s,
static MegasasCmd *megasas_enqueue_frame(MegasasState *s,
hwaddr frame, uint64_t context, int count)
{
+ PCIDevice *pcid = PCI_DEVICE(s);
MegasasCmd *cmd = NULL;
int frame_size = MFI_FRAME_SIZE * 16;
hwaddr frame_size_p = frame_size;
@@ -484,11 +486,11 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s,
if (!cmd->pa) {
cmd->pa = frame;
/* Map all possible frames */
- cmd->frame = cpu_physical_memory_map(frame, &frame_size_p, 0);
+ cmd->frame = pci_dma_map(pcid, frame, &frame_size_p, 0);
if (frame_size_p != frame_size) {
trace_megasas_qf_map_failed(cmd->index, (unsigned long)frame);
if (cmd->frame) {
- cpu_physical_memory_unmap(cmd->frame, frame_size_p, 0, 0);
+ pci_dma_unmap(pcid, cmd->frame, frame_size_p, 0, 0);
cmd->frame = NULL;
cmd->pa = 0;
}
@@ -561,13 +563,14 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context)
static void megasas_reset_frames(MegasasState *s)
{
+ PCIDevice *pcid = PCI_DEVICE(s);
int i;
MegasasCmd *cmd;
for (i = 0; i < s->fw_cmds; i++) {
cmd = &s->frames[i];
if (cmd->pa) {
- cpu_physical_memory_unmap(cmd->frame, cmd->pa_size, 0, 0);
+ pci_dma_unmap(pcid, cmd->frame, cmd->pa_size, 0, 0);
cmd->frame = NULL;
cmd->pa = 0;
}
@@ -584,6 +587,7 @@ static void megasas_abort_command(MegasasCmd *cmd)
static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd)
{
+ PCIDevice *pcid = PCI_DEVICE(s);
uint32_t pa_hi, pa_lo;
hwaddr iq_pa, initq_size;
struct mfi_init_qinfo *initq;
@@ -595,7 +599,7 @@ static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd)
iq_pa = (((uint64_t) pa_hi << 32) | pa_lo);
trace_megasas_init_firmware((uint64_t)iq_pa);
initq_size = sizeof(*initq);
- initq = cpu_physical_memory_map(iq_pa, &initq_size, 0);
+ initq = pci_dma_map(pcid, iq_pa, &initq_size, 0);
if (!initq || initq_size != sizeof(*initq)) {
trace_megasas_initq_map_failed(cmd->index);
s->event_count++;
@@ -631,7 +635,7 @@ static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd)
s->fw_state = MFI_FWSTATE_OPERATIONAL;
out:
if (initq) {
- cpu_physical_memory_unmap(initq, initq_size, 0, 0);
+ pci_dma_unmap(pcid, initq, initq_size, 0, 0);
}
return ret;
}
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 003d2843df..ea1ac09c8a 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -1429,7 +1429,7 @@ int scsi_build_sense(uint8_t *in_buf, int in_len,
}
}
-static const char *scsi_command_name(uint8_t cmd)
+const char *scsi_command_name(uint8_t cmd)
{
static const char *names[] = {
[ TEST_UNIT_READY ] = "TEST_UNIT_READY",
@@ -1545,6 +1545,8 @@ static const char *scsi_command_name(uint8_t cmd)
[ SET_READ_AHEAD ] = "SET_READ_AHEAD",
[ ALLOW_OVERWRITE ] = "ALLOW_OVERWRITE",
[ MECHANISM_STATUS ] = "MECHANISM_STATUS",
+ [ GET_EVENT_STATUS_NOTIFICATION ] = "GET_EVENT_STATUS_NOTIFICATION",
+ [ READ_DISC_INFORMATION ] = "READ_DISC_INFORMATION",
};
if (cmd >= ARRAY_SIZE(names) || names[cmd] == NULL)
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index fc6e32ada5..a529ad24c7 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -2015,7 +2015,7 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
case VERIFY_10:
case VERIFY_12:
case VERIFY_16:
- DPRINTF("Verify (bytchk %lu)\n", (r->req.buf[1] >> 1) & 3);
+ DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3);
if (req->cmd.buf[1] & 6) {
goto illegal_request;
}
@@ -2027,7 +2027,8 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
(long)r->req.cmd.xfer);
break;
default:
- DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]);
+ DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0],
+ scsi_command_name(buf[0]));
scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
return 0;
}
@@ -2526,7 +2527,7 @@ static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
* ones (such as WRITE SAME or EXTENDED COPY, etc.). So, without
* O_DIRECT everything must go through SG_IO.
*/
- if (bdrv_get_flags(s->qdev.conf.bs) & BDRV_O_NOCACHE) {
+ if (!(bdrv_get_flags(s->qdev.conf.bs) & BDRV_O_NOCACHE)) {
break;
}
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
index f96b7af791..048cfc7b05 100644
--- a/hw/scsi/spapr_vscsi.c
+++ b/hw/scsi/spapr_vscsi.c
@@ -799,8 +799,9 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
req->sreq = scsi_req_new(sdev, req->qtag, lun, srp->cmd.cdb, req);
n = scsi_req_enqueue(req->sreq);
- DPRINTF("VSCSI: Queued command tag 0x%x CMD 0x%x LUN %d ret: %d\n",
- req->qtag, srp->cmd.cdb[0], lun, n);
+ DPRINTF("VSCSI: Queued command tag 0x%x CMD 0x%x=%s LUN %d ret: %d\n",
+ req->qtag, srp->cmd.cdb[0], scsi_command_name(srp->cmd.cdb[0]),
+ lun, n);
if (n) {
/* Transfer direction must be set before preprocessing the
diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
index 668bafa72a..cc8df4ef03 100644
--- a/hw/scsi/vhost-scsi.c
+++ b/hw/scsi/vhost-scsi.c
@@ -196,6 +196,10 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
}
}
+static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+}
+
static void vhost_scsi_realize(DeviceState *dev, Error **errp)
{
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
@@ -217,7 +221,9 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
}
}
- virtio_scsi_common_realize(dev, &err);
+ virtio_scsi_common_realize(dev, &err, vhost_dummy_handle_output,
+ vhost_dummy_handle_output,
+ vhost_dummy_handle_output);
if (err != NULL) {
error_propagate(errp, err);
return;
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index b39880a9cd..8c8c9d1f61 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -15,6 +15,7 @@
#include "hw/virtio/virtio-scsi.h"
#include "qemu/error-report.h"
+#include "qemu/iov.h"
#include <hw/scsi/scsi.h>
#include <block/scsi.h>
#include <hw/virtio/virtio-bus.h>
@@ -25,21 +26,28 @@ typedef struct VirtIOSCSIReq {
VirtQueueElement elem;
QEMUSGList qsgl;
SCSIRequest *sreq;
+ size_t resp_size;
+ enum SCSIXferMode mode;
+ QEMUIOVector resp_iov;
union {
- char *buf;
- VirtIOSCSICmdReq *cmd;
- VirtIOSCSICtrlTMFReq *tmf;
- VirtIOSCSICtrlANReq *an;
- } req;
- union {
- char *buf;
- VirtIOSCSICmdResp *cmd;
- VirtIOSCSICtrlTMFResp *tmf;
- VirtIOSCSICtrlANResp *an;
- VirtIOSCSIEvent *event;
+ VirtIOSCSICmdResp cmd;
+ VirtIOSCSICtrlTMFResp tmf;
+ VirtIOSCSICtrlANResp an;
+ VirtIOSCSIEvent event;
} resp;
+ union {
+ struct {
+ VirtIOSCSICmdReq cmd;
+ uint8_t cdb[];
+ } QEMU_PACKED;
+ VirtIOSCSICtrlTMFReq tmf;
+ VirtIOSCSICtrlANReq an;
+ } req;
} VirtIOSCSIReq;
+QEMU_BUILD_BUG_ON(offsetof(VirtIOSCSIReq, req.cdb) !=
+ offsetof(VirtIOSCSIReq, req.cmd) + sizeof(VirtIOSCSICmdReq));
+
static inline int virtio_scsi_get_lun(uint8_t *lun)
{
return ((lun[2] << 8) | lun[3]) & 0x3FFF;
@@ -56,18 +64,41 @@ static inline SCSIDevice *virtio_scsi_device_find(VirtIOSCSI *s, uint8_t *lun)
return scsi_device_find(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
}
+static VirtIOSCSIReq *virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq)
+{
+ VirtIOSCSIReq *req;
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+
+ req = g_malloc0(sizeof(*req) + vs->cdb_size);
+
+ req->vq = vq;
+ req->dev = s;
+ req->sreq = NULL;
+ qemu_sglist_init(&req->qsgl, DEVICE(s), 8, &address_space_memory);
+ qemu_iovec_init(&req->resp_iov, 1);
+ return req;
+}
+
+static void virtio_scsi_free_req(VirtIOSCSIReq *req)
+{
+ qemu_iovec_destroy(&req->resp_iov);
+ qemu_sglist_destroy(&req->qsgl);
+ g_free(req);
+}
+
static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
{
VirtIOSCSI *s = req->dev;
VirtQueue *vq = req->vq;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
- virtqueue_push(vq, &req->elem, req->qsgl.size + req->elem.in_sg[0].iov_len);
- qemu_sglist_destroy(&req->qsgl);
+
+ qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
+ virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
if (req->sreq) {
req->sreq->hba_private = NULL;
scsi_req_unref(req->sreq);
}
- g_free(req);
+ virtio_scsi_free_req(req);
virtio_notify(vdev, vq);
}
@@ -77,50 +108,73 @@ static void virtio_scsi_bad_req(void)
exit(1);
}
-static void qemu_sgl_init_external(VirtIOSCSIReq *req, struct iovec *sg,
- hwaddr *addr, int num)
+static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
+ hwaddr *addr, int num, size_t skip)
{
QEMUSGList *qsgl = &req->qsgl;
-
- qemu_sglist_init(qsgl, DEVICE(req->dev), num, &address_space_memory);
- while (num--) {
- qemu_sglist_add(qsgl, *(addr++), (sg++)->iov_len);
+ size_t copied = 0;
+
+ while (num) {
+ if (skip >= iov->iov_len) {
+ skip -= iov->iov_len;
+ } else {
+ qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
+ copied += iov->iov_len - skip;
+ skip = 0;
+ }
+ iov++;
+ addr++;
+ num--;
}
+
+ assert(skip == 0);
+ return copied;
}
-static void virtio_scsi_parse_req(VirtIOSCSI *s, VirtQueue *vq,
- VirtIOSCSIReq *req)
+static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
+ unsigned req_size, unsigned resp_size)
{
- assert(req->elem.in_num);
- req->vq = vq;
- req->dev = s;
- req->sreq = NULL;
- if (req->elem.out_num) {
- req->req.buf = req->elem.out_sg[0].iov_base;
+ size_t in_size, out_size;
+
+ if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
+ &req->req, req_size) < req_size) {
+ return -EINVAL;
}
- req->resp.buf = req->elem.in_sg[0].iov_base;
- if (req->elem.out_num > 1) {
- qemu_sgl_init_external(req, &req->elem.out_sg[1],
- &req->elem.out_addr[1],
- req->elem.out_num - 1);
- } else {
- qemu_sgl_init_external(req, &req->elem.in_sg[1],
- &req->elem.in_addr[1],
- req->elem.in_num - 1);
+ if (qemu_iovec_concat_iov(&req->resp_iov,
+ req->elem.in_sg, req->elem.in_num, 0,
+ resp_size) < resp_size) {
+ return -EINVAL;
+ }
+ req->resp_size = resp_size;
+
+ out_size = qemu_sgl_concat(req, req->elem.out_sg,
+ &req->elem.out_addr[0], req->elem.out_num,
+ req_size);
+ in_size = qemu_sgl_concat(req, req->elem.in_sg,
+ &req->elem.in_addr[0], req->elem.in_num,
+ resp_size);
+
+ if (out_size && in_size) {
+ return -ENOTSUP;
+ }
+
+ if (out_size) {
+ req->mode = SCSI_XFER_TO_DEV;
+ } else if (in_size) {
+ req->mode = SCSI_XFER_FROM_DEV;
}
+
+ return 0;
}
static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
{
- VirtIOSCSIReq *req;
- req = g_malloc(sizeof(*req));
+ VirtIOSCSIReq *req = virtio_scsi_init_req(s, vq);
if (!virtqueue_pop(vq, &req->elem)) {
- g_free(req);
+ virtio_scsi_free_req(req);
return NULL;
}
-
- virtio_scsi_parse_req(s, vq, req);
return req;
}
@@ -143,9 +197,9 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
VirtIOSCSIReq *req;
uint32_t n;
- req = g_malloc(sizeof(*req));
qemu_get_be32s(f, &n);
assert(n < vs->conf.num_queues);
+ req = virtio_scsi_init_req(s, vs->cmd_vqs[n]);
qemu_get_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem));
/* TODO: add a way for SCSIBusInfo's load_request to fail,
* and fail migration instead of asserting here.
@@ -156,41 +210,44 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
#endif
assert(req->elem.in_num <= ARRAY_SIZE(req->elem.in_sg));
assert(req->elem.out_num <= ARRAY_SIZE(req->elem.out_sg));
- virtio_scsi_parse_req(s, vs->cmd_vqs[n], req);
+
+ if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
+ sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
+ error_report("invalid SCSI request migration data");
+ exit(1);
+ }
scsi_req_ref(sreq);
req->sreq = sreq;
if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
- int req_mode =
- (req->elem.in_num > 1 ? SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV);
-
- assert(req->sreq->cmd.mode == req_mode);
+ assert(req->sreq->cmd.mode == req->mode);
}
return req;
}
static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
- SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf->lun);
+ SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf.lun);
SCSIRequest *r, *next;
BusChild *kid;
int target;
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
- req->resp.tmf->response = VIRTIO_SCSI_S_OK;
+ req->resp.tmf.response = VIRTIO_SCSI_S_OK;
- switch (req->req.tmf->subtype) {
+ tswap32s(&req->req.tmf.subtype);
+ switch (req->req.tmf.subtype) {
case VIRTIO_SCSI_T_TMF_ABORT_TASK:
case VIRTIO_SCSI_T_TMF_QUERY_TASK:
if (!d) {
goto fail;
}
- if (d->lun != virtio_scsi_get_lun(req->req.tmf->lun)) {
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
goto incorrect_lun;
}
QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
VirtIOSCSIReq *cmd_req = r->hba_private;
- if (cmd_req && cmd_req->req.cmd->tag == req->req.tmf->tag) {
+ if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
break;
}
}
@@ -200,11 +257,11 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
* check for it in the loop above.
*/
assert(r->hba_private);
- if (req->req.tmf->subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
+ if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
/* "If the specified command is present in the task set, then
* return a service response set to FUNCTION SUCCEEDED".
*/
- req->resp.tmf->response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
} else {
scsi_req_cancel(r);
}
@@ -215,7 +272,7 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (!d) {
goto fail;
}
- if (d->lun != virtio_scsi_get_lun(req->req.tmf->lun)) {
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
goto incorrect_lun;
}
s->resetting++;
@@ -229,16 +286,16 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (!d) {
goto fail;
}
- if (d->lun != virtio_scsi_get_lun(req->req.tmf->lun)) {
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
goto incorrect_lun;
}
QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
if (r->hba_private) {
- if (req->req.tmf->subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
+ if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
/* "If there is any command present in the task set, then
* return a service response set to FUNCTION SUCCEEDED".
*/
- req->resp.tmf->response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
break;
} else {
scsi_req_cancel(r);
@@ -248,7 +305,7 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
break;
case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
- target = req->req.tmf->lun[1];
+ target = req->req.tmf.lun[1];
s->resetting++;
QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
d = DO_UPCAST(SCSIDevice, qdev, kid->child);
@@ -261,18 +318,18 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
default:
- req->resp.tmf->response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
break;
}
return;
incorrect_lun:
- req->resp.tmf->response = VIRTIO_SCSI_S_INCORRECT_LUN;
+ req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
return;
fail:
- req->resp.tmf->response = VIRTIO_SCSI_S_BAD_TARGET;
+ req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
}
static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
@@ -281,57 +338,70 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
VirtIOSCSIReq *req;
while ((req = virtio_scsi_pop_req(s, vq))) {
- int out_size, in_size;
- if (req->elem.out_num < 1 || req->elem.in_num < 1) {
+ int type;
+
+ if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
+ &type, sizeof(type)) < sizeof(type)) {
virtio_scsi_bad_req();
continue;
}
- out_size = req->elem.out_sg[0].iov_len;
- in_size = req->elem.in_sg[0].iov_len;
- if (req->req.tmf->type == VIRTIO_SCSI_T_TMF) {
- if (out_size < sizeof(VirtIOSCSICtrlTMFReq) ||
- in_size < sizeof(VirtIOSCSICtrlTMFResp)) {
+ tswap32s(&req->req.tmf.type);
+ if (req->req.tmf.type == VIRTIO_SCSI_T_TMF) {
+ if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
+ sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
virtio_scsi_bad_req();
+ } else {
+ virtio_scsi_do_tmf(s, req);
}
- virtio_scsi_do_tmf(s, req);
- } else if (req->req.tmf->type == VIRTIO_SCSI_T_AN_QUERY ||
- req->req.tmf->type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
- if (out_size < sizeof(VirtIOSCSICtrlANReq) ||
- in_size < sizeof(VirtIOSCSICtrlANResp)) {
+ } else if (req->req.tmf.type == VIRTIO_SCSI_T_AN_QUERY ||
+ req->req.tmf.type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
+ if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
+ sizeof(VirtIOSCSICtrlANResp)) < 0) {
virtio_scsi_bad_req();
+ } else {
+ req->resp.an.event_actual = 0;
+ req->resp.an.response = VIRTIO_SCSI_S_OK;
}
- req->resp.an->event_actual = 0;
- req->resp.an->response = VIRTIO_SCSI_S_OK;
}
virtio_scsi_complete_req(req);
}
}
+static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
+{
+ /* Sense data is not in req->resp and is copied separately
+ * in virtio_scsi_command_complete.
+ */
+ req->resp_size = sizeof(VirtIOSCSICmdResp);
+ virtio_scsi_complete_req(req);
+}
+
static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status,
size_t resid)
{
VirtIOSCSIReq *req = r->hba_private;
- VirtIOSCSI *s = req->dev;
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ uint8_t sense[SCSI_SENSE_BUF_SIZE];
uint32_t sense_len;
if (r->io_canceled) {
return;
}
- req->resp.cmd->response = VIRTIO_SCSI_S_OK;
- req->resp.cmd->status = status;
- if (req->resp.cmd->status == GOOD) {
- req->resp.cmd->resid = tswap32(resid);
+ req->resp.cmd.response = VIRTIO_SCSI_S_OK;
+ req->resp.cmd.status = status;
+ if (req->resp.cmd.status == GOOD) {
+ req->resp.cmd.resid = tswap32(resid);
} else {
- req->resp.cmd->resid = 0;
- sense_len = scsi_req_get_sense(r, req->resp.cmd->sense,
- vs->sense_size);
- req->resp.cmd->sense_len = tswap32(sense_len);
+ req->resp.cmd.resid = 0;
+ sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
+ sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
+ qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
+ &req->resp, sense_len);
+ req->resp.cmd.sense_len = tswap32(sense_len);
}
- virtio_scsi_complete_req(req);
+ virtio_scsi_complete_cmd_req(req);
}
static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
@@ -349,17 +419,17 @@ static void virtio_scsi_request_cancelled(SCSIRequest *r)
return;
}
if (req->dev->resetting) {
- req->resp.cmd->response = VIRTIO_SCSI_S_RESET;
+ req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
} else {
- req->resp.cmd->response = VIRTIO_SCSI_S_ABORTED;
+ req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
}
- virtio_scsi_complete_req(req);
+ virtio_scsi_complete_cmd_req(req);
}
static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
{
- req->resp.cmd->response = VIRTIO_SCSI_S_FAILURE;
- virtio_scsi_complete_req(req);
+ req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
+ virtio_scsi_complete_cmd_req(req);
}
static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
@@ -373,43 +443,35 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
while ((req = virtio_scsi_pop_req(s, vq))) {
SCSIDevice *d;
- int out_size, in_size;
- if (req->elem.out_num < 1 || req->elem.in_num < 1) {
- virtio_scsi_bad_req();
- }
+ int rc;
- out_size = req->elem.out_sg[0].iov_len;
- in_size = req->elem.in_sg[0].iov_len;
- if (out_size < sizeof(VirtIOSCSICmdReq) + vs->cdb_size ||
- in_size < sizeof(VirtIOSCSICmdResp) + vs->sense_size) {
- virtio_scsi_bad_req();
- }
-
- if (req->elem.out_num > 1 && req->elem.in_num > 1) {
- virtio_scsi_fail_cmd_req(req);
+ rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
+ sizeof(VirtIOSCSICmdResp) + vs->sense_size);
+ if (rc < 0) {
+ if (rc == -ENOTSUP) {
+ virtio_scsi_fail_cmd_req(req);
+ } else {
+ virtio_scsi_bad_req();
+ }
continue;
}
- d = virtio_scsi_device_find(s, req->req.cmd->lun);
+ d = virtio_scsi_device_find(s, req->req.cmd.lun);
if (!d) {
- req->resp.cmd->response = VIRTIO_SCSI_S_BAD_TARGET;
- virtio_scsi_complete_req(req);
+ req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ virtio_scsi_complete_cmd_req(req);
continue;
}
- req->sreq = scsi_req_new(d, req->req.cmd->tag,
- virtio_scsi_get_lun(req->req.cmd->lun),
- req->req.cmd->cdb, req);
-
- if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
- int req_mode =
- (req->elem.in_num > 1 ? SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV);
-
- if (req->sreq->cmd.mode != req_mode ||
- req->sreq->cmd.xfer > req->qsgl.size) {
- req->resp.cmd->response = VIRTIO_SCSI_S_OVERRUN;
- virtio_scsi_complete_req(req);
- continue;
- }
+ req->sreq = scsi_req_new(d, req->req.cmd.tag,
+ virtio_scsi_get_lun(req->req.cmd.lun),
+ req->req.cdb, req);
+
+ if (req->sreq->cmd.mode != SCSI_XFER_NONE
+ && (req->sreq->cmd.mode != req->mode ||
+ req->sreq->cmd.xfer > req->qsgl.size)) {
+ req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
+ virtio_scsi_complete_cmd_req(req);
+ continue;
}
n = scsi_req_enqueue(req->sreq);
@@ -513,7 +575,7 @@ static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
return;
}
- if (req->elem.out_num || req->elem.in_num != 1) {
+ if (req->elem.out_num) {
virtio_scsi_bad_req();
}
@@ -522,12 +584,12 @@ static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
s->events_dropped = false;
}
- in_size = req->elem.in_sg[0].iov_len;
+ in_size = iov_size(req->elem.in_sg, req->elem.in_num);
if (in_size < sizeof(VirtIOSCSIEvent)) {
virtio_scsi_bad_req();
}
- evt = req->resp.event;
+ evt = &req->resp.event;
memset(evt, 0, sizeof(VirtIOSCSIEvent));
evt->event = event;
evt->reason = reason;
@@ -605,7 +667,9 @@ static struct SCSIBusInfo virtio_scsi_scsi_info = {
.load_request = virtio_scsi_load_request,
};
-void virtio_scsi_common_realize(DeviceState *dev, Error **errp)
+void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
+ HandleOutput ctrl, HandleOutput evt,
+ HandleOutput cmd)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
@@ -619,12 +683,12 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp)
s->cdb_size = VIRTIO_SCSI_CDB_SIZE;
s->ctrl_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE,
- virtio_scsi_handle_ctrl);
+ ctrl);
s->event_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE,
- virtio_scsi_handle_event);
+ evt);
for (i = 0; i < s->conf.num_queues; i++) {
s->cmd_vqs[i] = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE,
- virtio_scsi_handle_cmd);
+ cmd);
}
}
@@ -635,7 +699,9 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
static int virtio_scsi_id;
Error *err = NULL;
- virtio_scsi_common_realize(dev, &err);
+ virtio_scsi_common_realize(dev, &err, virtio_scsi_handle_ctrl,
+ virtio_scsi_handle_event,
+ virtio_scsi_handle_cmd);
if (err != NULL) {
error_propagate(errp, err);
return;
diff --git a/include/block/scsi.h b/include/block/scsi.h
index 9ab045b613..edde960d18 100644
--- a/include/block/scsi.h
+++ b/include/block/scsi.h
@@ -143,6 +143,8 @@
#define READ_CD 0xbe
#define SEND_DVD_STRUCTURE 0xbf
+const char *scsi_command_name(uint8_t cmd);
+
/*
* SERVICE ACTION IN subcodes
*/
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index fa9d99792a..ca7a0bdd1a 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -268,6 +268,10 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
#define PC_COMPAT_2_0 \
{\
+ .driver = "virtio-scsi-pci",\
+ .property = "any_layout",\
+ .value = "off",\
+ },{\
.driver = "apic",\
.property = "version",\
.value = stringify(0x11),\
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index 42b102487a..a8f618578b 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -84,14 +84,13 @@
#define VIRTIO_SCSI_EVT_RESET_RESCAN 1
#define VIRTIO_SCSI_EVT_RESET_REMOVED 2
-/* SCSI command request, followed by data-out */
+/* SCSI command request, followed by CDB and data-out */
typedef struct {
uint8_t lun[8]; /* Logical Unit Number */
uint64_t tag; /* Command identifier */
uint8_t task_attr; /* Task attribute */
uint8_t prio;
uint8_t crn;
- uint8_t cdb[];
} QEMU_PACKED VirtIOSCSICmdReq;
/* Response, followed by sense data and data-in */
@@ -101,7 +100,6 @@ typedef struct {
uint16_t status_qualifier; /* Status qualifier */
uint8_t status; /* Command completion status */
uint8_t response; /* Response values */
- uint8_t sense[];
} QEMU_PACKED VirtIOSCSICmdResp;
/* Task Management Request */
@@ -186,7 +184,12 @@ typedef struct {
DEFINE_PROP_BIT("param_change", _state, _feature_field, \
VIRTIO_SCSI_F_CHANGE, true)
-void virtio_scsi_common_realize(DeviceState *dev, Error **errp);
+typedef void (*HandleOutput)(VirtIODevice *, VirtQueue *);
+
+void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
+ HandleOutput ctrl, HandleOutput evt,
+ HandleOutput cmd);
+
void virtio_scsi_common_unrealize(DeviceState *dev, Error **errp);
#endif /* _QEMU_VIRTIO_SCSI_H */
diff --git a/include/qemu-common.h b/include/qemu-common.h
index 66ceceb2ad..ae76197532 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -315,9 +315,9 @@ void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
void qemu_iovec_concat(QEMUIOVector *dst,
QEMUIOVector *src, size_t soffset, size_t sbytes);
-void qemu_iovec_concat_iov(QEMUIOVector *dst,
- struct iovec *src_iov, unsigned int src_cnt,
- size_t soffset, size_t sbytes);
+size_t qemu_iovec_concat_iov(QEMUIOVector *dst,
+ struct iovec *src_iov, unsigned int src_cnt,
+ size_t soffset, size_t sbytes);
bool qemu_iovec_is_zero(QEMUIOVector *qiov);
void qemu_iovec_destroy(QEMUIOVector *qiov);
void qemu_iovec_reset(QEMUIOVector *qiov);
diff --git a/include/qemu/aes.h b/include/qemu/aes.h
index c10666059f..a006da2224 100644
--- a/include/qemu/aes.h
+++ b/include/qemu/aes.h
@@ -10,6 +10,15 @@ struct aes_key_st {
};
typedef struct aes_key_st AES_KEY;
+/* FreeBSD has its own AES_set_decrypt_key in -lcrypto, avoid conflicts */
+#ifdef __FreeBSD__
+#define AES_set_encrypt_key QEMU_AES_set_encrypt_key
+#define AES_set_decrypt_key QEMU_AES_set_decrypt_key
+#define AES_encrypt QEMU_AES_encrypt
+#define AES_decrypt QEMU_AES_decrypt
+#define AES_cbc_encrypt QEMU_AES_cbc_encrypt
+#endif
+
int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
AES_KEY *key);
int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 16cebbe16d..34ae3c2857 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -911,12 +911,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
break;
}
- /* 32-bit ops (non 64-bit ops and non load/store ops) generate
- 32-bit results. For the result is zero test below, we can
- ignore high bits, but for further optimizations we need to
- record that the high bits contain garbage. */
+ /* 32-bit ops generate 32-bit results. For the result is zero test
+ below, we can ignore high bits, but for further optimizations we
+ need to record that the high bits contain garbage. */
partmask = mask;
- if (!(def->flags & (TCG_OPF_CALL_CLOBBER | TCG_OPF_64BIT))) {
+ if (!(def->flags & TCG_OPF_64BIT)) {
mask |= ~(tcg_target_ulong)0xffffffffu;
partmask &= 0xffffffffu;
affected &= 0xffffffffu;
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 03040b157f..66e25788ce 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -534,7 +534,7 @@ static void interface_release_resource(QXLInstance *sin,
QXLCommandExt *ext;
dprint(2, "%s/%d:\n", __func__, ssd->qxl.id);
- ext = (void *)(rext.info->id);
+ ext = (void *)(intptr_t)(rext.info->id);
switch (ext->cmd.type) {
case QXL_CMD_DRAW:
update = container_of(ext, SimpleSpiceUpdate, ext);
diff --git a/util/iov.c b/util/iov.c
index 49f88388f8..2b4f46da75 100644
--- a/util/iov.c
+++ b/util/iov.c
@@ -295,15 +295,15 @@ void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len)
* of src".
* Only vector pointers are processed, not the actual data buffers.
*/
-void qemu_iovec_concat_iov(QEMUIOVector *dst,
- struct iovec *src_iov, unsigned int src_cnt,
- size_t soffset, size_t sbytes)
+size_t qemu_iovec_concat_iov(QEMUIOVector *dst,
+ struct iovec *src_iov, unsigned int src_cnt,
+ size_t soffset, size_t sbytes)
{
int i;
size_t done;
if (!sbytes) {
- return;
+ return 0;
}
assert(dst->nalloc != -1);
for (i = 0, done = 0; done < sbytes && i < src_cnt; i++) {
@@ -317,6 +317,8 @@ void qemu_iovec_concat_iov(QEMUIOVector *dst,
}
}
assert(soffset == 0); /* offset beyond end of src */
+
+ return done;
}
/*
diff --git a/util/oslib-win32.c b/util/oslib-win32.c
index 69552f7ec3..157f10fab2 100644
--- a/util/oslib-win32.c
+++ b/util/oslib-win32.c
@@ -24,6 +24,10 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
+ *
+ * The implementation of g_poll (functions poll_rest, g_poll) at the end of
+ * this file are based on code from GNOME glib-2 and use a different license,
+ * see the license comment there.
*/
#include <windows.h>
#include <glib.h>
@@ -138,7 +142,7 @@ int inet_aton(const char *cp, struct in_addr *ia)
{
uint32_t addr = inet_addr(cp);
if (addr == 0xffffffff) {
- return 0;
+ return 0;
}
ia->s_addr = addr;
return 1;
@@ -240,113 +244,205 @@ char *qemu_get_exec_dir(void)
}
/*
- * g_poll has a problem on Windows when using
- * timeouts < 10ms, in glib/gpoll.c:
+ * The original implementation of g_poll from glib has a problem on Windows
+ * when using timeouts < 10 ms.
*
- * // If not, and we have a significant timeout, poll again with
- * // timeout then. Note that this will return indication for only
- * // one event, or only for messages. We ignore timeouts less than
- * // ten milliseconds as they are mostly pointless on Windows, the
- * // MsgWaitForMultipleObjectsEx() call will timeout right away
- * // anyway.
+ * Whenever g_poll is called with timeout < 10 ms, it does a quick poll instead
+ * of wait. This causes significant performance degradation of QEMU.
*
- * if (retval == 0 && (timeout == INFINITE || timeout >= 10))
- * retval = poll_rest (poll_msgs, handles, nhandles, fds, nfds, timeout);
+ * The following code is a copy of the original code from glib/gpoll.c
+ * (glib commit 20f4d1820b8d4d0fc4447188e33efffd6d4a88d8 from 2014-02-19).
+ * Some debug code was removed and the code was reformatted.
+ * All other code modifications are marked with 'QEMU'.
+ */
+
+/*
+ * gpoll.c: poll(2) abstraction
+ * Copyright 1998 Owen Taylor
+ * Copyright 2008 Red Hat, Inc.
*
- * So whenever g_poll is called with timeout < 10ms it does
- * a quick poll instead of wait, this causes significant performance
- * degradation of QEMU, thus we should use WaitForMultipleObjectsEx
- * directly
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-gint g_poll_fixed(GPollFD *fds, guint nfds, gint timeout)
+
+static int poll_rest(gboolean poll_msgs, HANDLE *handles, gint nhandles,
+ GPollFD *fds, guint nfds, gint timeout)
{
- guint i;
- HANDLE handles[MAXIMUM_WAIT_OBJECTS];
- gint nhandles = 0;
- int num_completed = 0;
+ DWORD ready;
+ GPollFD *f;
+ int recursed_result;
- for (i = 0; i < nfds; i++) {
- gint j;
+ if (poll_msgs) {
+ /* Wait for either messages or handles
+ * -> Use MsgWaitForMultipleObjectsEx
+ */
+ ready = MsgWaitForMultipleObjectsEx(nhandles, handles, timeout,
+ QS_ALLINPUT, MWMO_ALERTABLE);
- if (fds[i].fd <= 0) {
- continue;
+ if (ready == WAIT_FAILED) {
+ gchar *emsg = g_win32_error_message(GetLastError());
+ g_warning("MsgWaitForMultipleObjectsEx failed: %s", emsg);
+ g_free(emsg);
}
-
- /* don't add same handle several times
+ } else if (nhandles == 0) {
+ /* No handles to wait for, just the timeout */
+ if (timeout == INFINITE) {
+ ready = WAIT_FAILED;
+ } else {
+ SleepEx(timeout, TRUE);
+ ready = WAIT_TIMEOUT;
+ }
+ } else {
+ /* Wait for just handles
+ * -> Use WaitForMultipleObjectsEx
*/
- for (j = 0; j < nhandles; j++) {
- if (handles[j] == (HANDLE)fds[i].fd) {
- break;
- }
+ ready =
+ WaitForMultipleObjectsEx(nhandles, handles, FALSE, timeout, TRUE);
+ if (ready == WAIT_FAILED) {
+ gchar *emsg = g_win32_error_message(GetLastError());
+ g_warning("WaitForMultipleObjectsEx failed: %s", emsg);
+ g_free(emsg);
}
+ }
- if (j == nhandles) {
- if (nhandles == MAXIMUM_WAIT_OBJECTS) {
- fprintf(stderr, "Too many handles to wait for!\n");
- break;
- } else {
- handles[nhandles++] = (HANDLE)fds[i].fd;
+ if (ready == WAIT_FAILED) {
+ return -1;
+ } else if (ready == WAIT_TIMEOUT || ready == WAIT_IO_COMPLETION) {
+ return 0;
+ } else if (poll_msgs && ready == WAIT_OBJECT_0 + nhandles) {
+ for (f = fds; f < &fds[nfds]; ++f) {
+ if (f->fd == G_WIN32_MSG_HANDLE && f->events & G_IO_IN) {
+ f->revents |= G_IO_IN;
}
}
- }
- for (i = 0; i < nfds; ++i) {
- fds[i].revents = 0;
- }
+ /* If we have a timeout, or no handles to poll, be satisfied
+ * with just noticing we have messages waiting.
+ */
+ if (timeout != 0 || nhandles == 0) {
+ return 1;
+ }
- if (timeout == -1) {
- timeout = INFINITE;
- }
+ /* If no timeout and handles to poll, recurse to poll them,
+ * too.
+ */
+ recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0);
+ return (recursed_result == -1) ? -1 : 1 + recursed_result;
+ } else if (/* QEMU: removed the following unneeded statement which causes
+ * a compiler warning: ready >= WAIT_OBJECT_0 && */
+ ready < WAIT_OBJECT_0 + nhandles) {
+ for (f = fds; f < &fds[nfds]; ++f) {
+ if ((HANDLE) f->fd == handles[ready - WAIT_OBJECT_0]) {
+ f->revents = f->events;
+ }
+ }
- if (nhandles == 0) {
- if (timeout == INFINITE) {
- return -1;
- } else {
- SleepEx(timeout, TRUE);
- return 0;
+ /* If no timeout and polling several handles, recurse to poll
+ * the rest of them.
+ */
+ if (timeout == 0 && nhandles > 1) {
+ /* Remove the handle that fired */
+ int i;
+ if (ready < nhandles - 1) {
+ for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) {
+ handles[i-1] = handles[i];
+ }
+ }
+ nhandles--;
+ recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0);
+ return (recursed_result == -1) ? -1 : 1 + recursed_result;
}
+ return 1;
}
- while (1) {
- DWORD res;
- gint j;
-
- res = WaitForMultipleObjectsEx(nhandles, handles, FALSE,
- timeout, TRUE);
+ return 0;
+}
- if (res == WAIT_FAILED) {
- for (i = 0; i < nfds; ++i) {
- fds[i].revents = 0;
+gint g_poll(GPollFD *fds, guint nfds, gint timeout)
+{
+ HANDLE handles[MAXIMUM_WAIT_OBJECTS];
+ gboolean poll_msgs = FALSE;
+ GPollFD *f;
+ gint nhandles = 0;
+ int retval;
+
+ for (f = fds; f < &fds[nfds]; ++f) {
+ if (f->fd == G_WIN32_MSG_HANDLE && (f->events & G_IO_IN)) {
+ poll_msgs = TRUE;
+ } else if (f->fd > 0) {
+ /* Don't add the same handle several times into the array, as
+ * docs say that is not allowed, even if it actually does seem
+ * to work.
+ */
+ gint i;
+
+ for (i = 0; i < nhandles; i++) {
+ if (handles[i] == (HANDLE) f->fd) {
+ break;
+ }
}
- return -1;
- } else if ((res == WAIT_TIMEOUT) || (res == WAIT_IO_COMPLETION) ||
- ((int)res < (int)WAIT_OBJECT_0) ||
- (res >= (WAIT_OBJECT_0 + nhandles))) {
- break;
- }
-
- for (i = 0; i < nfds; ++i) {
- if (handles[res - WAIT_OBJECT_0] == (HANDLE)fds[i].fd) {
- fds[i].revents = fds[i].events;
+ if (i == nhandles) {
+ if (nhandles == MAXIMUM_WAIT_OBJECTS) {
+ g_warning("Too many handles to wait for!\n");
+ break;
+ } else {
+ handles[nhandles++] = (HANDLE) f->fd;
+ }
}
}
+ }
- ++num_completed;
+ for (f = fds; f < &fds[nfds]; ++f) {
+ f->revents = 0;
+ }
- if (nhandles <= 1) {
- break;
- }
+ if (timeout == -1) {
+ timeout = INFINITE;
+ }
- /* poll the rest of the handles
+ /* Polling for several things? */
+ if (nhandles > 1 || (nhandles > 0 && poll_msgs)) {
+ /* First check if one or several of them are immediately
+ * available
+ */
+ retval = poll_rest(poll_msgs, handles, nhandles, fds, nfds, 0);
+
+ /* If not, and we have a significant timeout, poll again with
+ * timeout then. Note that this will return indication for only
+ * one event, or only for messages. We ignore timeouts less than
+ * ten milliseconds as they are mostly pointless on Windows, the
+ * MsgWaitForMultipleObjectsEx() call will timeout right away
+ * anyway.
+ *
+ * Modification for QEMU: replaced timeout >= 10 by timeout > 0.
*/
- for (j = res - WAIT_OBJECT_0 + 1; j < nhandles; j++) {
- handles[j - 1] = handles[j];
+ if (retval == 0 && (timeout == INFINITE || timeout > 0)) {
+ retval = poll_rest(poll_msgs, handles, nhandles,
+ fds, nfds, timeout);
}
- --nhandles;
+ } else {
+ /* Just polling for one thing, so no need to check first if
+ * available immediately
+ */
+ retval = poll_rest(poll_msgs, handles, nhandles, fds, nfds, timeout);
+ }
- timeout = 0;
+ if (retval == -1) {
+ for (f = fds; f < &fds[nfds]; ++f) {
+ f->revents = 0;
+ }
}
- return num_completed;
+ return retval;
}