aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.target2
-rw-r--r--block.c138
-rw-r--r--block/Makefile.objs2
-rw-r--r--block/accounting.c7
-rw-r--r--block/block-backend.c229
-rw-r--r--block/dmg.c502
-rw-r--r--block/nbd-client.c128
-rw-r--r--block/nbd-client.h34
-rw-r--r--block/nbd.c44
-rw-r--r--block/qapi.c5
-rw-r--r--block/qcow.c2
-rw-r--r--block/qcow2-cache.c4
-rw-r--r--block/qcow2-refcount.c78
-rw-r--r--block/qcow2.c8
-rw-r--r--block/qed.c5
-rw-r--r--block/qed.h1
-rw-r--r--block/raw-posix.c142
-rw-r--r--block/raw-win32.c2
-rw-r--r--block/sheepdog.c4
-rw-r--r--block/vmdk.c3
-rw-r--r--block/write-threshold.c125
-rw-r--r--blockdev.c136
-rwxr-xr-xconfigure50
-rw-r--r--cpu-exec.c12
-rw-r--r--cpus.c4
-rw-r--r--default-configs/alpha-softmmu.mak2
-rw-r--r--default-configs/arm-softmmu.mak2
-rw-r--r--default-configs/i386-softmmu.mak2
-rw-r--r--default-configs/mips-softmmu.mak2
-rw-r--r--default-configs/mips64-softmmu.mak2
-rw-r--r--default-configs/mips64el-softmmu.mak2
-rw-r--r--default-configs/mipsel-softmmu.mak2
-rw-r--r--default-configs/pci.mak2
-rw-r--r--default-configs/ppc-softmmu.mak2
-rw-r--r--default-configs/ppc64-softmmu.mak2
-rw-r--r--default-configs/ppcemb-softmmu.mak2
-rw-r--r--default-configs/sparc64-softmmu.mak2
-rw-r--r--default-configs/x86_64-softmmu.mak2
-rw-r--r--disas/sh4.c2
-rw-r--r--fsdev/virtfs-proxy-helper.c13
-rw-r--r--hmp.c15
-rw-r--r--hw/alpha/typhoon.c2
-rw-r--r--hw/arm/virt.c158
-rw-r--r--hw/block/dataplane/virtio-blk.c12
-rw-r--r--hw/block/onenand.c8
-rw-r--r--hw/block/virtio-blk.c299
-rw-r--r--hw/block/xen_disk.c27
-rw-r--r--hw/char/serial.c14
-rw-r--r--hw/char/virtio-serial-bus.c2
-rw-r--r--hw/core/fw-path-provider.c2
-rw-r--r--hw/display/cirrus_vga.c2
-rw-r--r--hw/display/vga-isa.c2
-rw-r--r--hw/display/vga.c5
-rw-r--r--hw/display/xenfb.c5
-rw-r--r--hw/i386/pc_piix.c2
-rw-r--r--hw/ide/atapi.c17
-rw-r--r--hw/ide/core.c1
-rw-r--r--hw/ide/internal.h2
-rw-r--r--hw/ide/pci.c11
-rw-r--r--hw/input/ps2.c16
-rw-r--r--hw/isa/i82378.c3
-rw-r--r--hw/isa/isa-bus.c12
-rw-r--r--hw/isa/lpc_ich9.c2
-rw-r--r--hw/isa/piix4.c3
-rw-r--r--hw/isa/vt82c686.c3
-rw-r--r--hw/mips/gt64xxx_pci.c95
-rw-r--r--hw/mips/mips_jazz.c44
-rw-r--r--hw/mips/mips_r4k.c19
-rw-r--r--hw/net/pcnet-pci.c49
-rw-r--r--hw/net/pcnet.c28
-rw-r--r--hw/net/rtl8139.c14
-rw-r--r--hw/pci-host/Makefile.objs1
-rw-r--r--hw/pci-host/gpex.c154
-rw-r--r--hw/pci-host/piix.c3
-rw-r--r--hw/ppc/spapr.c2
-rw-r--r--hw/ppc/spapr_vio.c2
-rw-r--r--hw/scsi/virtio-scsi-dataplane.c2
-rw-r--r--hw/sparc64/sun4u.c3
-rw-r--r--hw/usb/desc-msos.c2
-rw-r--r--hw/virtio/Makefile.objs2
-rw-r--r--hw/virtio/dataplane/Makefile.objs2
-rw-r--r--hw/virtio/dataplane/vring.c53
-rw-r--r--include/block/accounting.h3
-rw-r--r--include/block/block.h7
-rw-r--r--include/block/block_int.h7
-rw-r--r--include/block/nbd.h3
-rw-r--r--include/block/write-threshold.h64
-rw-r--r--include/exec/gen-icount.h22
-rw-r--r--include/hw/isa/isa.h6
-rw-r--r--include/hw/pci-host/gpex.h56
-rw-r--r--include/hw/pci/pci.h1
-rw-r--r--include/hw/virtio/dataplane/vring-accessors.h75
-rw-r--r--include/hw/virtio/dataplane/vring.h14
-rw-r--r--include/hw/virtio/virtio-blk.h18
-rw-r--r--include/migration/vmstate.h2
-rw-r--r--include/qapi/qmp/qerror.h9
-rw-r--r--include/qemu-io.h4
-rw-r--r--include/sysemu/block-backend.h14
-rw-r--r--include/sysemu/device_tree.h9
-rw-r--r--include/ui/console.h1
-rw-r--r--kvm-all.c4
-rw-r--r--libcacard/Makefile2
-rw-r--r--linux-user/main.c64
-rw-r--r--linux-user/syscall.c9
-rw-r--r--linux-user/syscall_defs.h95
-rw-r--r--migration/rdma.c11
-rw-r--r--monitor.c16
-rw-r--r--nbd.c50
-rw-r--r--qapi/block-core.json60
-rw-r--r--qemu-img.c210
-rw-r--r--qemu-io-cmds.c250
-rw-r--r--qemu-io.c58
-rw-r--r--qemu-log.c2
-rw-r--r--qemu-nbd.c32
-rw-r--r--qjson.c2
-rw-r--r--qmp-commands.hx54
-rw-r--r--qmp.c19
-rw-r--r--savevm.c11
-rwxr-xr-xscripts/analyze-migration.py2
-rw-r--r--scripts/qtest.py71
-rw-r--r--spice-qemu-char.c2
-rw-r--r--stubs/qtest.c2
-rw-r--r--target-alpha/translate.c16
-rw-r--r--target-arm/cpu.c5
-rw-r--r--target-arm/cpu.h2
-rw-r--r--target-arm/cpu64.c39
-rw-r--r--target-arm/helper-a64.c5
-rw-r--r--target-arm/helper.c211
-rw-r--r--target-arm/kvm64.c36
-rw-r--r--target-arm/op_helper.c6
-rw-r--r--target-arm/translate-a64.c28
-rw-r--r--target-arm/translate.c10
-rw-r--r--target-cris/translate.c15
-rw-r--r--target-i386/translate.c11
-rw-r--r--target-lm32/translate.c16
-rw-r--r--target-m68k/translate.c10
-rw-r--r--target-microblaze/translate.c22
-rw-r--r--target-mips/machine.c6
-rw-r--r--target-mips/op_helper.c10
-rw-r--r--target-mips/translate.c25
-rw-r--r--target-mips/translate_init.c4
-rw-r--r--target-moxie/machine.c1
-rw-r--r--target-moxie/translate.c10
-rw-r--r--target-openrisc/translate.c15
-rw-r--r--target-ppc/translate.c11
-rw-r--r--target-s390x/translate.c11
-rw-r--r--target-sh4/translate.c10
-rw-r--r--target-sparc/translate.c10
-rw-r--r--target-tricore/translate.c5
-rw-r--r--target-unicore32/translate.c10
-rw-r--r--target-xtensa/translate.c8
-rw-r--r--tcg/optimize.c307
-rw-r--r--tcg/tcg-op.c1934
-rw-r--r--tcg/tcg-op.h2439
-rw-r--r--tcg/tcg-opc.h9
-rw-r--r--tcg/tcg.c532
-rw-r--r--tcg/tcg.h72
-rw-r--r--tci.c13
-rw-r--r--tests/Makefile8
-rw-r--r--tests/ahci-test.c1072
-rw-r--r--tests/libqos/ahci.c838
-rw-r--r--tests/libqos/ahci.h549
-rw-r--r--tests/libqos/libqos-pc.c24
-rw-r--r--tests/libqos/libqos-pc.h9
-rw-r--r--tests/libqos/libqos.c63
-rw-r--r--tests/libqos/libqos.h33
-rw-r--r--tests/libqos/malloc-pc.c20
-rw-r--r--tests/libqos/malloc.c91
-rw-r--r--tests/libqos/malloc.h26
-rw-r--r--tests/qemu-iotests/016.out23
-rwxr-xr-xtests/qemu-iotests/0511
-rw-r--r--tests/qemu-iotests/051.out63
-rw-r--r--tests/qemu-iotests/067.out5
-rwxr-xr-xtests/qemu-iotests/0833
-rw-r--r--tests/qemu-iotests/083.out81
-rw-r--r--tests/qemu-iotests/087.out8
-rwxr-xr-xtests/qemu-iotests/093114
-rw-r--r--tests/qemu-iotests/093.out5
-rwxr-xr-xtests/qemu-iotests/09481
-rw-r--r--tests/qemu-iotests/094.out11
-rwxr-xr-xtests/qemu-iotests/10012
-rwxr-xr-xtests/qemu-iotests/1049
-rwxr-xr-xtests/qemu-iotests/11696
-rw-r--r--tests/qemu-iotests/116.out37
-rwxr-xr-xtests/qemu-iotests/123 (renamed from tests/qemu-iotests/016)43
-rw-r--r--tests/qemu-iotests/123.out9
-rw-r--r--tests/qemu-iotests/common.filter1
-rw-r--r--tests/qemu-iotests/common.qemu12
-rw-r--r--tests/qemu-iotests/common.rc2
-rw-r--r--tests/qemu-iotests/group6
-rw-r--r--tests/qemu-iotests/iotests.py25
-rw-r--r--tests/test-write-threshold.c119
-rw-r--r--trace-events42
-rw-r--r--translate-all.c2
-rw-r--r--ui/vnc.c78
-rw-r--r--util/aes.c2
-rw-r--r--util/qemu-option.c8
-rw-r--r--util/qemu-sockets.c26
-rw-r--r--util/uri.c61
-rw-r--r--vl.c7
200 files changed, 8492 insertions, 5256 deletions
diff --git a/Makefile.target b/Makefile.target
index e9ff1eed7b..58c6ae1d69 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -83,7 +83,7 @@ all: $(PROGS) stap
#########################################################
# cpu emulator library
obj-y = exec.o translate-all.o cpu-exec.o
-obj-y += tcg/tcg.o tcg/optimize.o
+obj-y += tcg/tcg.o tcg/tcg-op.o tcg/optimize.o
obj-$(CONFIG_TCG_INTERPRETER) += tci.o
obj-$(CONFIG_TCG_INTERPRETER) += disas/tci.o
obj-y += fpu/softfloat.o
diff --git a/block.c b/block.c
index d45e4ddf31..9b707e3d1b 100644
--- a/block.c
+++ b/block.c
@@ -508,9 +508,8 @@ int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
Error *local_err = NULL;
int ret;
- drv = bdrv_find_protocol(filename, true);
+ drv = bdrv_find_protocol(filename, true, errp);
if (drv == NULL) {
- error_setg(errp, "Could not find protocol for file '%s'", filename);
return -ENOENT;
}
@@ -628,7 +627,8 @@ static BlockDriver *find_hdev_driver(const char *filename)
}
BlockDriver *bdrv_find_protocol(const char *filename,
- bool allow_protocol_prefix)
+ bool allow_protocol_prefix,
+ Error **errp)
{
BlockDriver *drv1;
char protocol[128];
@@ -666,6 +666,8 @@ BlockDriver *bdrv_find_protocol(const char *filename,
return drv1;
}
}
+
+ error_setg(errp, "Unknown protocol '%s'", protocol);
return NULL;
}
@@ -970,7 +972,6 @@ static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
bs->zero_beyond_eof = true;
open_flags = bdrv_open_flags(bs, flags);
bs->read_only = !(open_flags & BDRV_O_RDWR);
- bs->growable = !!(flags & BDRV_O_PROTOCOL);
if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
error_setg(errp,
@@ -1136,9 +1137,8 @@ static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
} else {
if (!drvname && protocol) {
if (filename) {
- drv = bdrv_find_protocol(filename, parse_filename);
+ drv = bdrv_find_protocol(filename, parse_filename, errp);
if (!drv) {
- error_setg(errp, "Unknown protocol");
return -EINVAL;
}
@@ -1885,7 +1885,6 @@ void bdrv_close(BlockDriverState *bs)
bs->encrypted = 0;
bs->valid_key = 0;
bs->sg = 0;
- bs->growable = 0;
bs->zero_beyond_eof = false;
QDECREF(bs->options);
bs->options = NULL;
@@ -2645,25 +2644,17 @@ exit:
static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
size_t size)
{
- int64_t len;
-
- if (size > INT_MAX) {
+ if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
return -EIO;
}
- if (!bdrv_is_inserted(bs))
+ if (!bdrv_is_inserted(bs)) {
return -ENOMEDIUM;
+ }
- if (bs->growable)
- return 0;
-
- len = bdrv_getlength(bs);
-
- if (offset < 0)
- return -EIO;
-
- if ((offset > len) || (len - offset < size))
+ if (offset < 0) {
return -EIO;
+ }
return 0;
}
@@ -2671,7 +2662,7 @@ static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
- if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
+ if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
return -EIO;
}
@@ -2758,7 +2749,7 @@ static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
};
- if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
+ if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
return -EINVAL;
}
@@ -2826,13 +2817,10 @@ int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
}
for (;;) {
- nb_sectors = target_sectors - sector_num;
+ nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
if (nb_sectors <= 0) {
return 0;
}
- if (nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
- nb_sectors = INT_MAX / BDRV_SECTOR_SIZE;
- }
ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
if (ret < 0) {
error_report("error getting block status at sector %" PRId64 ": %s",
@@ -3045,10 +3033,10 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
}
/* Forward the request to the BlockDriver */
- if (!(bs->zero_beyond_eof && bs->growable)) {
+ if (!bs->zero_beyond_eof) {
ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
} else {
- /* Read zeros after EOF of growable BDSes */
+ /* Read zeros after EOF */
int64_t total_sectors, max_nb_sectors;
total_sectors = bdrv_nb_sectors(bs);
@@ -3110,8 +3098,10 @@ static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
if (!drv) {
return -ENOMEDIUM;
}
- if (bdrv_check_byte_request(bs, offset, bytes)) {
- return -EIO;
+
+ ret = bdrv_check_byte_request(bs, offset, bytes);
+ if (ret < 0) {
+ return ret;
}
if (bs->copy_on_read) {
@@ -3167,7 +3157,7 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
- if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
+ if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
return -EINVAL;
}
@@ -3192,10 +3182,7 @@ int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
BDRV_REQ_COPY_ON_READ);
}
-/* if no limit is specified in the BlockLimits use a default
- * of 32768 512-byte sectors (16 MiB) per request.
- */
-#define MAX_WRITE_ZEROES_DEFAULT 32768
+#define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
@@ -3205,8 +3192,8 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
struct iovec iov = {0};
int ret = 0;
- int max_write_zeroes = bs->bl.max_write_zeroes ?
- bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
+ int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
+ BDRV_REQUEST_MAX_SECTORS);
while (nb_sectors > 0 && !ret) {
int num = nb_sectors;
@@ -3242,7 +3229,7 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
if (ret == -ENOTSUP) {
/* Fall back to bounce buffer if write zeroes is unsupported */
int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
- MAX_WRITE_ZEROES_DEFAULT);
+ MAX_WRITE_ZEROES_BOUNCE_BUFFER);
num = MIN(num, max_xfer_len);
iov.iov_len = num * BDRV_SECTOR_SIZE;
if (iov.iov_base == NULL) {
@@ -3328,7 +3315,7 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
- if (bs->growable && ret >= 0) {
+ if (ret >= 0) {
bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
}
@@ -3357,8 +3344,10 @@ static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
if (bs->read_only) {
return -EACCES;
}
- if (bdrv_check_byte_request(bs, offset, bytes)) {
- return -EIO;
+
+ ret = bdrv_check_byte_request(bs, offset, bytes);
+ if (ret < 0) {
+ return ret;
}
/* throttling disk I/O */
@@ -3461,7 +3450,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
- if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
+ if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
return -EINVAL;
}
@@ -3719,6 +3708,36 @@ int bdrv_set_key(BlockDriverState *bs, const char *key)
return ret;
}
+/*
+ * Provide an encryption key for @bs.
+ * If @key is non-null:
+ * If @bs is not encrypted, fail.
+ * Else if the key is invalid, fail.
+ * Else set @bs's key to @key, replacing the existing key, if any.
+ * If @key is null:
+ * If @bs is encrypted and still lacks a key, fail.
+ * Else do nothing.
+ * On failure, store an error object through @errp if non-null.
+ */
+void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp)
+{
+ if (key) {
+ if (!bdrv_is_encrypted(bs)) {
+ error_setg(errp, "Device '%s' is not encrypted",
+ bdrv_get_device_name(bs));
+ } else if (bdrv_set_key(bs, key) < 0) {
+ error_set(errp, QERR_INVALID_PASSWORD);
+ }
+ } else {
+ if (bdrv_key_required(bs)) {
+ error_set(errp, ERROR_CLASS_DEVICE_ENCRYPTED,
+ "'%s' (%s) is encrypted",
+ bdrv_get_device_name(bs),
+ bdrv_get_encrypted_filename(bs));
+ }
+ }
+}
+
const char *bdrv_get_format_name(BlockDriverState *bs)
{
return bs->drv ? bs->drv->format_name : NULL;
@@ -4182,12 +4201,18 @@ int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
const uint8_t *buf, int nb_sectors)
{
BlockDriver *drv = bs->drv;
- if (!drv)
+ int ret;
+
+ if (!drv) {
return -ENOMEDIUM;
- if (!drv->bdrv_write_compressed)
+ }
+ if (!drv->bdrv_write_compressed) {
return -ENOTSUP;
- if (bdrv_check_request(bs, sector_num, nb_sectors))
- return -EIO;
+ }
+ ret = bdrv_check_request(bs, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
assert(QLIST_EMPTY(&bs->dirty_bitmaps));
@@ -4562,6 +4587,8 @@ static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
}
}
+ block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1);
+
return outidx + 1;
}
@@ -5097,20 +5124,18 @@ static void coroutine_fn bdrv_discard_co_entry(void *opaque)
rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
}
-/* if no limit is specified in the BlockLimits use a default
- * of 32768 512-byte sectors (16 MiB) per request.
- */
-#define MAX_DISCARD_DEFAULT 32768
-
int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
- int max_discard;
+ int max_discard, ret;
if (!bs->drv) {
return -ENOMEDIUM;
- } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
- return -EIO;
+ }
+
+ ret = bdrv_check_request(bs, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
} else if (bs->read_only) {
return -EROFS;
}
@@ -5126,7 +5151,7 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
return 0;
}
- max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
+ max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
while (nb_sectors > 0) {
int ret;
int num = nb_sectors;
@@ -5602,9 +5627,8 @@ void bdrv_img_create(const char *filename, const char *fmt,
return;
}
- proto_drv = bdrv_find_protocol(filename, true);
+ proto_drv = bdrv_find_protocol(filename, true, errp);
if (!proto_drv) {
- error_setg(errp, "Unknown protocol '%s'", filename);
return;
}
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 04b0e43eb1..db2933e469 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -20,6 +20,7 @@ block-obj-$(CONFIG_GLUSTERFS) += gluster.o
block-obj-$(CONFIG_ARCHIPELAGO) += archipelago.o
block-obj-$(CONFIG_LIBSSH2) += ssh.o
block-obj-y += accounting.o
+block-obj-y += write-threshold.o
common-obj-y += stream.o
common-obj-y += commit.o
@@ -36,5 +37,6 @@ gluster.o-libs := $(GLUSTERFS_LIBS)
ssh.o-cflags := $(LIBSSH2_CFLAGS)
ssh.o-libs := $(LIBSSH2_LIBS)
archipelago.o-libs := $(ARCHIPELAGO_LIBS)
+dmg.o-libs := $(BZIP2_LIBS)
qcow.o-libs := -lz
linux-aio.o-libs := -laio
diff --git a/block/accounting.c b/block/accounting.c
index 18102f015d..01d594ffdc 100644
--- a/block/accounting.c
+++ b/block/accounting.c
@@ -54,3 +54,10 @@ void block_acct_highest_sector(BlockAcctStats *stats, int64_t sector_num,
stats->wr_highest_sector = sector_num + nb_sectors - 1;
}
}
+
+void block_acct_merge_done(BlockAcctStats *stats, enum BlockAcctType type,
+ int num_requests)
+{
+ assert(type < BLOCK_MAX_IOTYPE);
+ stats->merged[type] += num_requests;
+}
diff --git a/block/block-backend.c b/block/block-backend.c
index d00c129f15..aabe569642 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -31,6 +31,16 @@ struct BlockBackend {
void *dev_opaque;
};
+typedef struct BlockBackendAIOCB {
+ BlockAIOCB common;
+ QEMUBH *bh;
+ int ret;
+} BlockBackendAIOCB;
+
+static const AIOCBInfo block_backend_aiocb_info = {
+ .aiocb_size = sizeof(BlockBackendAIOCB),
+};
+
static void drive_info_del(DriveInfo *dinfo);
/* All the BlockBackends (except for hidden ones) */
@@ -91,6 +101,40 @@ BlockBackend *blk_new_with_bs(const char *name, Error **errp)
return blk;
}
+/*
+ * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
+ *
+ * Just as with bdrv_open(), after having called this function the reference to
+ * @options belongs to the block layer (even on failure).
+ *
+ * TODO: Remove @filename and @flags; it should be possible to specify a whole
+ * BDS tree just by specifying the @options QDict (or @reference,
+ * alternatively). At the time of adding this function, this is not possible,
+ * though, so callers of this function have to be able to specify @filename and
+ * @flags.
+ */
+BlockBackend *blk_new_open(const char *name, const char *filename,
+ const char *reference, QDict *options, int flags,
+ Error **errp)
+{
+ BlockBackend *blk;
+ int ret;
+
+ blk = blk_new_with_bs(name, errp);
+ if (!blk) {
+ QDECREF(options);
+ return NULL;
+ }
+
+ ret = bdrv_open(&blk->bs, filename, reference, options, flags, NULL, errp);
+ if (ret < 0) {
+ blk_unref(blk);
+ return NULL;
+ }
+
+ return blk;
+}
+
static void blk_delete(BlockBackend *blk)
{
assert(!blk->refcnt);
@@ -394,39 +438,137 @@ void blk_iostatus_enable(BlockBackend *blk)
bdrv_iostatus_enable(blk->bs);
}
+static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
+ size_t size)
+{
+ int64_t len;
+
+ if (size > INT_MAX) {
+ return -EIO;
+ }
+
+ if (!blk_is_inserted(blk)) {
+ return -ENOMEDIUM;
+ }
+
+ len = blk_getlength(blk);
+ if (len < 0) {
+ return len;
+ }
+
+ if (offset < 0) {
+ return -EIO;
+ }
+
+ if (offset > len || len - offset < size) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int blk_check_request(BlockBackend *blk, int64_t sector_num,
+ int nb_sectors)
+{
+ if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
+ return -EIO;
+ }
+
+ if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
+ return -EIO;
+ }
+
+ return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
+ nb_sectors * BDRV_SECTOR_SIZE);
+}
+
int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
int nb_sectors)
{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
+
return bdrv_read(blk->bs, sector_num, buf, nb_sectors);
}
int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
int nb_sectors)
{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
+
return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors);
}
int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
int nb_sectors)
{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
+
return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
}
+static void error_callback_bh(void *opaque)
+{
+ struct BlockBackendAIOCB *acb = opaque;
+ qemu_bh_delete(acb->bh);
+ acb->common.cb(acb->common.opaque, acb->ret);
+ qemu_aio_unref(acb);
+}
+
+static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb,
+ void *opaque, int ret)
+{
+ struct BlockBackendAIOCB *acb;
+ QEMUBH *bh;
+
+ acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
+ acb->ret = ret;
+
+ bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
+ acb->bh = bh;
+ qemu_bh_schedule(bh);
+
+ return &acb->common;
+}
+
BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
int nb_sectors, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return abort_aio_request(blk, cb, opaque, ret);
+ }
+
return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
cb, opaque);
}
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
{
+ int ret = blk_check_byte_request(blk, offset, count);
+ if (ret < 0) {
+ return ret;
+ }
+
return bdrv_pread(blk->bs, offset, buf, count);
}
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
{
+ int ret = blk_check_byte_request(blk, offset, count);
+ if (ret < 0) {
+ return ret;
+ }
+
return bdrv_pwrite(blk->bs, offset, buf, count);
}
@@ -440,10 +582,20 @@ void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
bdrv_get_geometry(blk->bs, nb_sectors_ptr);
}
+int64_t blk_nb_sectors(BlockBackend *blk)
+{
+ return bdrv_nb_sectors(blk->bs);
+}
+
BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque)
{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return abort_aio_request(blk, cb, opaque, ret);
+ }
+
return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
}
@@ -451,6 +603,11 @@ BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque)
{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return abort_aio_request(blk, cb, opaque, ret);
+ }
+
return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
}
@@ -464,6 +621,11 @@ BlockAIOCB *blk_aio_discard(BlockBackend *blk,
int64_t sector_num, int nb_sectors,
BlockCompletionFunc *cb, void *opaque)
{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return abort_aio_request(blk, cb, opaque, ret);
+ }
+
return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
}
@@ -479,6 +641,15 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
{
+ int i, ret;
+
+ for (i = 0; i < num_reqs; i++) {
+ ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
}
@@ -495,6 +666,11 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
+
return bdrv_co_discard(blk->bs, sector_num, nb_sectors);
}
@@ -580,6 +756,11 @@ int blk_get_flags(BlockBackend *blk)
return bdrv_get_flags(blk->bs);
}
+int blk_get_max_transfer_length(BlockBackend *blk)
+{
+ return blk->bs->bl.max_transfer_length;
+}
+
void blk_set_guest_block_size(BlockBackend *blk, int align)
{
bdrv_set_guest_block_size(blk->bs, align);
@@ -663,3 +844,51 @@ void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
{
return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
}
+
+int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
+ int nb_sectors, BdrvRequestFlags flags)
+{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
+}
+
+int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
+ const uint8_t *buf, int nb_sectors)
+{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors);
+}
+
+int blk_truncate(BlockBackend *blk, int64_t offset)
+{
+ return bdrv_truncate(blk->bs, offset);
+}
+
+int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
+{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return bdrv_discard(blk->bs, sector_num, nb_sectors);
+}
+
+int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
+ int64_t pos, int size)
+{
+ return bdrv_save_vmstate(blk->bs, buf, pos, size);
+}
+
+int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
+{
+ return bdrv_load_vmstate(blk->bs, buf, pos, size);
+}
diff --git a/block/dmg.c b/block/dmg.c
index e455886d2b..825c49d59a 100644
--- a/block/dmg.c
+++ b/block/dmg.c
@@ -26,6 +26,10 @@
#include "qemu/bswap.h"
#include "qemu/module.h"
#include <zlib.h>
+#ifdef CONFIG_BZIP2
+#include <bzlib.h>
+#endif
+#include <glib.h>
enum {
/* Limit chunk sizes to prevent unreasonable amounts of memory being used
@@ -55,6 +59,9 @@ typedef struct BDRVDMGState {
uint8_t *compressed_chunk;
uint8_t *uncompressed_chunk;
z_stream zstream;
+#ifdef CONFIG_BZIP2
+ bz_stream bzstream;
+#endif
} BDRVDMGState;
static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
@@ -100,6 +107,16 @@ static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
return 0;
}
+static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
+{
+ return be64_to_cpu(*(uint64_t *)&buffer[offset]);
+}
+
+static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
+{
+ return be32_to_cpu(*(uint32_t *)&buffer[offset]);
+}
+
/* Increase max chunk sizes, if necessary. This function is used to calculate
* the buffer sizes needed for compressed/uncompressed chunk I/O.
*/
@@ -112,6 +129,7 @@ static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
switch (s->types[chunk]) {
case 0x80000005: /* zlib compressed */
+ case 0x80000006: /* bzip2 compressed */
compressed_size = s->lengths[chunk];
uncompressed_sectors = s->sectorcounts[chunk];
break;
@@ -119,7 +137,9 @@ static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
break;
case 2: /* zero */
- uncompressed_sectors = s->sectorcounts[chunk];
+ /* as the all-zeroes block may be large, it is treated specially: the
+ * sector is not copied from a large buffer, a simple memset is used
+ * instead. Therefore uncompressed_sectors does not need to be set. */
break;
}
@@ -131,163 +151,372 @@ static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
}
}
+static int64_t dmg_find_koly_offset(BlockDriverState *file_bs, Error **errp)
+{
+ int64_t length;
+ int64_t offset = 0;
+ uint8_t buffer[515];
+ int i, ret;
+
+ /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
+ * dmg images can have odd sizes, try to look for the "koly" magic which
+ * marks the begin of the UDIF trailer (512 bytes). This magic can be found
+ * in the last 511 bytes of the second-last sector or the first 4 bytes of
+ * the last sector (search space: 515 bytes) */
+ length = bdrv_getlength(file_bs);
+ if (length < 0) {
+ error_setg_errno(errp, -length,
+ "Failed to get file size while reading UDIF trailer");
+ return length;
+ } else if (length < 512) {
+ error_setg(errp, "dmg file must be at least 512 bytes long");
+ return -EINVAL;
+ }
+ if (length > 511 + 512) {
+ offset = length - 511 - 512;
+ }
+ length = length < 515 ? length : 515;
+ ret = bdrv_pread(file_bs, offset, buffer, length);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
+ return ret;
+ }
+ for (i = 0; i < length - 3; i++) {
+ if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
+ buffer[i+2] == 'l' && buffer[i+3] == 'y') {
+ return offset + i;
+ }
+ }
+ error_setg(errp, "Could not locate UDIF trailer in dmg file");
+ return -EINVAL;
+}
+
+/* used when building the sector table */
+typedef struct DmgHeaderState {
+ /* used internally by dmg_read_mish_block to remember offsets of blocks
+ * across calls */
+ uint64_t data_fork_offset;
+ /* exported for dmg_open */
+ uint32_t max_compressed_size;
+ uint32_t max_sectors_per_chunk;
+} DmgHeaderState;
+
+static bool dmg_is_known_block_type(uint32_t entry_type)
+{
+ switch (entry_type) {
+ case 0x00000001: /* uncompressed */
+ case 0x00000002: /* zeroes */
+ case 0x80000005: /* zlib */
+#ifdef CONFIG_BZIP2
+ case 0x80000006: /* bzip2 */
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
+ uint8_t *buffer, uint32_t count)
+{
+ uint32_t type, i;
+ int ret;
+ size_t new_size;
+ uint32_t chunk_count;
+ int64_t offset = 0;
+ uint64_t data_offset;
+ uint64_t in_offset = ds->data_fork_offset;
+ uint64_t out_offset;
+
+ type = buff_read_uint32(buffer, offset);
+ /* skip data that is not a valid MISH block (invalid magic or too small) */
+ if (type != 0x6d697368 || count < 244) {
+ /* assume success for now */
+ return 0;
+ }
+
+ /* chunk offsets are relative to this sector number */
+ out_offset = buff_read_uint64(buffer, offset + 8);
+
+ /* location in data fork for (compressed) blob (in bytes) */
+ data_offset = buff_read_uint64(buffer, offset + 0x18);
+ in_offset += data_offset;
+
+ /* move to begin of chunk entries */
+ offset += 204;
+
+ chunk_count = (count - 204) / 40;
+ new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
+ s->types = g_realloc(s->types, new_size / 2);
+ s->offsets = g_realloc(s->offsets, new_size);
+ s->lengths = g_realloc(s->lengths, new_size);
+ s->sectors = g_realloc(s->sectors, new_size);
+ s->sectorcounts = g_realloc(s->sectorcounts, new_size);
+
+ for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
+ s->types[i] = buff_read_uint32(buffer, offset);
+ if (!dmg_is_known_block_type(s->types[i])) {
+ chunk_count--;
+ i--;
+ offset += 40;
+ continue;
+ }
+
+ /* sector number */
+ s->sectors[i] = buff_read_uint64(buffer, offset + 8);
+ s->sectors[i] += out_offset;
+
+ /* sector count */
+ s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
+
+ /* all-zeroes sector (type 2) does not need to be "uncompressed" and can
+ * therefore be unbounded. */
+ if (s->types[i] != 2 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
+ error_report("sector count %" PRIu64 " for chunk %" PRIu32
+ " is larger than max (%u)",
+ s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* offset in (compressed) data fork */
+ s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
+ s->offsets[i] += in_offset;
+
+ /* length in (compressed) data fork */
+ s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
+
+ if (s->lengths[i] > DMG_LENGTHS_MAX) {
+ error_report("length %" PRIu64 " for chunk %" PRIu32
+ " is larger than max (%u)",
+ s->lengths[i], i, DMG_LENGTHS_MAX);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ update_max_chunk_size(s, i, &ds->max_compressed_size,
+ &ds->max_sectors_per_chunk);
+ offset += 40;
+ }
+ s->n_chunks += chunk_count;
+ return 0;
+
+fail:
+ return ret;
+}
+
+static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
+ uint64_t info_begin, uint64_t info_length)
+{
+ BDRVDMGState *s = bs->opaque;
+ int ret;
+ uint32_t count, rsrc_data_offset;
+ uint8_t *buffer = NULL;
+ uint64_t info_end;
+ uint64_t offset;
+
+ /* read offset from begin of resource fork (info_begin) to resource data */
+ ret = read_uint32(bs, info_begin, &rsrc_data_offset);
+ if (ret < 0) {
+ goto fail;
+ } else if (rsrc_data_offset > info_length) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* read length of resource data */
+ ret = read_uint32(bs, info_begin + 8, &count);
+ if (ret < 0) {
+ goto fail;
+ } else if (count == 0 || rsrc_data_offset + count > info_length) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* begin of resource data (consisting of one or more resources) */
+ offset = info_begin + rsrc_data_offset;
+
+ /* end of resource data (there is possibly a following resource map
+ * which will be ignored). */
+ info_end = offset + count;
+
+ /* read offsets (mish blocks) from one or more resources in resource data */
+ while (offset < info_end) {
+ /* size of following resource */
+ ret = read_uint32(bs, offset, &count);
+ if (ret < 0) {
+ goto fail;
+ } else if (count == 0 || count > info_end - offset) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ offset += 4;
+
+ buffer = g_realloc(buffer, count);
+ ret = bdrv_pread(bs->file, offset, buffer, count);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = dmg_read_mish_block(s, ds, buffer, count);
+ if (ret < 0) {
+ goto fail;
+ }
+ /* advance offset by size of resource */
+ offset += count;
+ }
+ ret = 0;
+
+fail:
+ g_free(buffer);
+ return ret;
+}
+
+static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
+ uint64_t info_begin, uint64_t info_length)
+{
+ BDRVDMGState *s = bs->opaque;
+ int ret;
+ uint8_t *buffer = NULL;
+ char *data_begin, *data_end;
+
+ /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
+ * safe upper cap on the data length. A test sample had a XML length of
+ * about 1 MiB. */
+ if (info_length == 0 || info_length > 16 * 1024 * 1024) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ buffer = g_malloc(info_length + 1);
+ buffer[info_length] = '\0';
+ ret = bdrv_pread(bs->file, info_begin, buffer, info_length);
+ if (ret != info_length) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
+ * decode. The actual data element has 431 (0x1af) bytes which includes tabs
+ * and line feeds. */
+ data_end = (char *)buffer;
+ while ((data_begin = strstr(data_end, "<data>")) != NULL) {
+ guchar *mish;
+ gsize out_len = 0;
+
+ data_begin += 6;
+ data_end = strstr(data_begin, "</data>");
+ /* malformed XML? */
+ if (data_end == NULL) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ *data_end++ = '\0';
+ mish = g_base64_decode(data_begin, &out_len);
+ ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
+ g_free(mish);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+ ret = 0;
+
+fail:
+ g_free(buffer);
+ return ret;
+}
+
static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVDMGState *s = bs->opaque;
- uint64_t info_begin, info_end, last_in_offset, last_out_offset;
- uint32_t count, tmp;
- uint32_t max_compressed_size = 1, max_sectors_per_chunk = 1, i;
+ DmgHeaderState ds;
+ uint64_t rsrc_fork_offset, rsrc_fork_length;
+ uint64_t plist_xml_offset, plist_xml_length;
int64_t offset;
int ret;
bs->read_only = 1;
s->n_chunks = 0;
s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
+ /* used by dmg_read_mish_block to keep track of the current I/O position */
+ ds.data_fork_offset = 0;
+ ds.max_compressed_size = 1;
+ ds.max_sectors_per_chunk = 1;
- /* read offset of info blocks */
- offset = bdrv_getlength(bs->file);
+ /* locate the UDIF trailer */
+ offset = dmg_find_koly_offset(bs->file, errp);
if (offset < 0) {
ret = offset;
goto fail;
}
- offset -= 0x1d8;
- ret = read_uint64(bs, offset, &info_begin);
+ /* offset of data fork (DataForkOffset) */
+ ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
if (ret < 0) {
goto fail;
- } else if (info_begin == 0) {
+ } else if (ds.data_fork_offset > offset) {
ret = -EINVAL;
goto fail;
}
- ret = read_uint32(bs, info_begin, &tmp);
+ /* offset of resource fork (RsrcForkOffset) */
+ ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
if (ret < 0) {
goto fail;
- } else if (tmp != 0x100) {
+ }
+ ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
+ if (ret < 0) {
+ goto fail;
+ }
+ if (rsrc_fork_offset >= offset ||
+ rsrc_fork_length > offset - rsrc_fork_offset) {
ret = -EINVAL;
goto fail;
}
-
- ret = read_uint32(bs, info_begin + 4, &count);
+ /* offset of property list (XMLOffset) */
+ ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
if (ret < 0) {
goto fail;
- } else if (count == 0) {
+ }
+ ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
+ if (ret < 0) {
+ goto fail;
+ }
+ if (plist_xml_offset >= offset ||
+ plist_xml_length > offset - plist_xml_offset) {
ret = -EINVAL;
goto fail;
}
- info_end = info_begin + count;
-
- offset = info_begin + 0x100;
-
- /* read offsets */
- last_in_offset = last_out_offset = 0;
- while (offset < info_end) {
- uint32_t type;
-
- ret = read_uint32(bs, offset, &count);
+ ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
+ if (ret < 0) {
+ goto fail;
+ }
+ if (bs->total_sectors < 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (rsrc_fork_length != 0) {
+ ret = dmg_read_resource_fork(bs, &ds,
+ rsrc_fork_offset, rsrc_fork_length);
if (ret < 0) {
goto fail;
- } else if (count == 0) {
- ret = -EINVAL;
- goto fail;
}
- offset += 4;
-
- ret = read_uint32(bs, offset, &type);
+ } else if (plist_xml_length != 0) {
+ ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
if (ret < 0) {
goto fail;
}
-
- if (type == 0x6d697368 && count >= 244) {
- size_t new_size;
- uint32_t chunk_count;
-
- offset += 4;
- offset += 200;
-
- chunk_count = (count - 204) / 40;
- new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
- s->types = g_realloc(s->types, new_size / 2);
- s->offsets = g_realloc(s->offsets, new_size);
- s->lengths = g_realloc(s->lengths, new_size);
- s->sectors = g_realloc(s->sectors, new_size);
- s->sectorcounts = g_realloc(s->sectorcounts, new_size);
-
- for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
- ret = read_uint32(bs, offset, &s->types[i]);
- if (ret < 0) {
- goto fail;
- }
- offset += 4;
- if (s->types[i] != 0x80000005 && s->types[i] != 1 &&
- s->types[i] != 2) {
- if (s->types[i] == 0xffffffff && i > 0) {
- last_in_offset = s->offsets[i - 1] + s->lengths[i - 1];
- last_out_offset = s->sectors[i - 1] +
- s->sectorcounts[i - 1];
- }
- chunk_count--;
- i--;
- offset += 36;
- continue;
- }
- offset += 4;
-
- ret = read_uint64(bs, offset, &s->sectors[i]);
- if (ret < 0) {
- goto fail;
- }
- s->sectors[i] += last_out_offset;
- offset += 8;
-
- ret = read_uint64(bs, offset, &s->sectorcounts[i]);
- if (ret < 0) {
- goto fail;
- }
- offset += 8;
-
- if (s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
- error_report("sector count %" PRIu64 " for chunk %" PRIu32
- " is larger than max (%u)",
- s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
- ret = -EINVAL;
- goto fail;
- }
-
- ret = read_uint64(bs, offset, &s->offsets[i]);
- if (ret < 0) {
- goto fail;
- }
- s->offsets[i] += last_in_offset;
- offset += 8;
-
- ret = read_uint64(bs, offset, &s->lengths[i]);
- if (ret < 0) {
- goto fail;
- }
- offset += 8;
-
- if (s->lengths[i] > DMG_LENGTHS_MAX) {
- error_report("length %" PRIu64 " for chunk %" PRIu32
- " is larger than max (%u)",
- s->lengths[i], i, DMG_LENGTHS_MAX);
- ret = -EINVAL;
- goto fail;
- }
-
- update_max_chunk_size(s, i, &max_compressed_size,
- &max_sectors_per_chunk);
- }
- s->n_chunks += chunk_count;
- }
+ } else {
+ ret = -EINVAL;
+ goto fail;
}
/* initialize zlib engine */
s->compressed_chunk = qemu_try_blockalign(bs->file,
- max_compressed_size + 1);
+ ds.max_compressed_size + 1);
s->uncompressed_chunk = qemu_try_blockalign(bs->file,
- 512 * max_sectors_per_chunk);
+ 512 * ds.max_sectors_per_chunk);
if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
ret = -ENOMEM;
goto fail;
@@ -349,13 +578,16 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
int ret;
uint32_t chunk = search_chunk(s, sector_num);
+#ifdef CONFIG_BZIP2
+ uint64_t total_out;
+#endif
if (chunk >= s->n_chunks) {
return -1;
}
s->current_chunk = s->n_chunks;
- switch (s->types[chunk]) {
+ switch (s->types[chunk]) { /* block entry type */
case 0x80000005: { /* zlib compressed */
/* we need to buffer, because only the chunk as whole can be
* inflated. */
@@ -379,6 +611,34 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
return -1;
}
break; }
+#ifdef CONFIG_BZIP2
+ case 0x80000006: /* bzip2 compressed */
+ /* we need to buffer, because only the chunk as whole can be
+ * inflated. */
+ ret = bdrv_pread(bs->file, s->offsets[chunk],
+ s->compressed_chunk, s->lengths[chunk]);
+ if (ret != s->lengths[chunk]) {
+ return -1;
+ }
+
+ ret = BZ2_bzDecompressInit(&s->bzstream, 0, 0);
+ if (ret != BZ_OK) {
+ return -1;
+ }
+ s->bzstream.next_in = (char *)s->compressed_chunk;
+ s->bzstream.avail_in = (unsigned int) s->lengths[chunk];
+ s->bzstream.next_out = (char *)s->uncompressed_chunk;
+ s->bzstream.avail_out = (unsigned int) 512 * s->sectorcounts[chunk];
+ ret = BZ2_bzDecompress(&s->bzstream);
+ total_out = ((uint64_t)s->bzstream.total_out_hi32 << 32) +
+ s->bzstream.total_out_lo32;
+ BZ2_bzDecompressEnd(&s->bzstream);
+ if (ret != BZ_STREAM_END ||
+ total_out != 512 * s->sectorcounts[chunk]) {
+ return -1;
+ }
+ break;
+#endif /* CONFIG_BZIP2 */
case 1: /* copy */
ret = bdrv_pread(bs->file, s->offsets[chunk],
s->uncompressed_chunk, s->lengths[chunk]);
@@ -387,7 +647,8 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
}
break;
case 2: /* zero */
- memset(s->uncompressed_chunk, 0, 512 * s->sectorcounts[chunk]);
+ /* see dmg_read, it is treated specially. No buffer needs to be
+ * pre-filled, the zeroes can be set directly. */
break;
}
s->current_chunk = chunk;
@@ -406,6 +667,13 @@ static int dmg_read(BlockDriverState *bs, int64_t sector_num,
if (dmg_read_chunk(bs, sector_num + i) != 0) {
return -1;
}
+ /* Special case: current chunk is all zeroes. Do not perform a memcpy as
+ * s->uncompressed_chunk may be too small to cover the large all-zeroes
+ * section. dmg_read_chunk is called to find s->current_chunk */
+ if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */
+ memset(buf + i * 512, 0, 512);
+ continue;
+ }
sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
memcpy(buf + i * 512,
s->uncompressed_chunk + sector_offset_in_chunk * 512, 512);
diff --git a/block/nbd-client.c b/block/nbd-client.c
index 6e1c97cad0..259f5a3cb6 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -43,20 +43,23 @@ static void nbd_recv_coroutines_enter_all(NbdClientSession *s)
}
}
-static void nbd_teardown_connection(NbdClientSession *client)
+static void nbd_teardown_connection(BlockDriverState *bs)
{
+ NbdClientSession *client = nbd_get_client_session(bs);
+
/* finish any pending coroutines */
shutdown(client->sock, 2);
nbd_recv_coroutines_enter_all(client);
- nbd_client_session_detach_aio_context(client);
+ nbd_client_detach_aio_context(bs);
closesocket(client->sock);
client->sock = -1;
}
static void nbd_reply_ready(void *opaque)
{
- NbdClientSession *s = opaque;
+ BlockDriverState *bs = opaque;
+ NbdClientSession *s = nbd_get_client_session(bs);
uint64_t i;
int ret;
@@ -89,28 +92,40 @@ static void nbd_reply_ready(void *opaque)
}
fail:
- nbd_teardown_connection(s);
+ nbd_teardown_connection(bs);
}
static void nbd_restart_write(void *opaque)
{
- NbdClientSession *s = opaque;
+ BlockDriverState *bs = opaque;
- qemu_coroutine_enter(s->send_coroutine, NULL);
+ qemu_coroutine_enter(nbd_get_client_session(bs)->send_coroutine, NULL);
}
-static int nbd_co_send_request(NbdClientSession *s,
- struct nbd_request *request,
- QEMUIOVector *qiov, int offset)
+static int nbd_co_send_request(BlockDriverState *bs,
+ struct nbd_request *request,
+ QEMUIOVector *qiov, int offset)
{
+ NbdClientSession *s = nbd_get_client_session(bs);
AioContext *aio_context;
- int rc, ret;
+ int rc, ret, i;
qemu_co_mutex_lock(&s->send_mutex);
+
+ for (i = 0; i < MAX_NBD_REQUESTS; i++) {
+ if (s->recv_coroutine[i] == NULL) {
+ s->recv_coroutine[i] = qemu_coroutine_self();
+ break;
+ }
+ }
+
+ assert(i < MAX_NBD_REQUESTS);
+ request->handle = INDEX_TO_HANDLE(s, i);
s->send_coroutine = qemu_coroutine_self();
- aio_context = bdrv_get_aio_context(s->bs);
+ aio_context = bdrv_get_aio_context(bs);
+
aio_set_fd_handler(aio_context, s->sock,
- nbd_reply_ready, nbd_restart_write, s);
+ nbd_reply_ready, nbd_restart_write, bs);
if (qiov) {
if (!s->is_unix) {
socket_set_cork(s->sock, 1);
@@ -129,7 +144,7 @@ static int nbd_co_send_request(NbdClientSession *s,
} else {
rc = nbd_send_request(s->sock, request);
}
- aio_set_fd_handler(aio_context, s->sock, nbd_reply_ready, NULL, s);
+ aio_set_fd_handler(aio_context, s->sock, nbd_reply_ready, NULL, bs);
s->send_coroutine = NULL;
qemu_co_mutex_unlock(&s->send_mutex);
return rc;
@@ -164,8 +179,6 @@ static void nbd_co_receive_reply(NbdClientSession *s,
static void nbd_coroutine_start(NbdClientSession *s,
struct nbd_request *request)
{
- int i;
-
/* Poor man semaphore. The free_sema is locked when no other request
* can be accepted, and unlocked after receiving one reply. */
if (s->in_flight >= MAX_NBD_REQUESTS - 1) {
@@ -174,15 +187,7 @@ static void nbd_coroutine_start(NbdClientSession *s,
}
s->in_flight++;
- for (i = 0; i < MAX_NBD_REQUESTS; i++) {
- if (s->recv_coroutine[i] == NULL) {
- s->recv_coroutine[i] = qemu_coroutine_self();
- break;
- }
- }
-
- assert(i < MAX_NBD_REQUESTS);
- request->handle = INDEX_TO_HANDLE(s, i);
+ /* s->recv_coroutine[i] is set as soon as we get the send_lock. */
}
static void nbd_coroutine_end(NbdClientSession *s,
@@ -195,10 +200,11 @@ static void nbd_coroutine_end(NbdClientSession *s,
}
}
-static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num,
+static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov,
int offset)
{
+ NbdClientSession *client = nbd_get_client_session(bs);
struct nbd_request request = { .type = NBD_CMD_READ };
struct nbd_reply reply;
ssize_t ret;
@@ -207,7 +213,7 @@ static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num,
request.len = nb_sectors * 512;
nbd_coroutine_start(client, &request);
- ret = nbd_co_send_request(client, &request, NULL, 0);
+ ret = nbd_co_send_request(bs, &request, NULL, 0);
if (ret < 0) {
reply.error = -ret;
} else {
@@ -218,15 +224,16 @@ static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num,
}
-static int nbd_co_writev_1(NbdClientSession *client, int64_t sector_num,
+static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov,
int offset)
{
+ NbdClientSession *client = nbd_get_client_session(bs);
struct nbd_request request = { .type = NBD_CMD_WRITE };
struct nbd_reply reply;
ssize_t ret;
- if (!bdrv_enable_write_cache(client->bs) &&
+ if (!bdrv_enable_write_cache(bs) &&
(client->nbdflags & NBD_FLAG_SEND_FUA)) {
request.type |= NBD_CMD_FLAG_FUA;
}
@@ -235,7 +242,7 @@ static int nbd_co_writev_1(NbdClientSession *client, int64_t sector_num,
request.len = nb_sectors * 512;
nbd_coroutine_start(client, &request);
- ret = nbd_co_send_request(client, &request, qiov, offset);
+ ret = nbd_co_send_request(bs, &request, qiov, offset);
if (ret < 0) {
reply.error = -ret;
} else {
@@ -249,14 +256,13 @@ static int nbd_co_writev_1(NbdClientSession *client, int64_t sector_num,
* remain aligned to 4K. */
#define NBD_MAX_SECTORS 2040
-int nbd_client_session_co_readv(NbdClientSession *client, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov)
+int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, QEMUIOVector *qiov)
{
int offset = 0;
int ret;
while (nb_sectors > NBD_MAX_SECTORS) {
- ret = nbd_co_readv_1(client, sector_num,
- NBD_MAX_SECTORS, qiov, offset);
+ ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
if (ret < 0) {
return ret;
}
@@ -264,17 +270,16 @@ int nbd_client_session_co_readv(NbdClientSession *client, int64_t sector_num,
sector_num += NBD_MAX_SECTORS;
nb_sectors -= NBD_MAX_SECTORS;
}
- return nbd_co_readv_1(client, sector_num, nb_sectors, qiov, offset);
+ return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset);
}
-int nbd_client_session_co_writev(NbdClientSession *client, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov)
+int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, QEMUIOVector *qiov)
{
int offset = 0;
int ret;
while (nb_sectors > NBD_MAX_SECTORS) {
- ret = nbd_co_writev_1(client, sector_num,
- NBD_MAX_SECTORS, qiov, offset);
+ ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
if (ret < 0) {
return ret;
}
@@ -282,11 +287,12 @@ int nbd_client_session_co_writev(NbdClientSession *client, int64_t sector_num,
sector_num += NBD_MAX_SECTORS;
nb_sectors -= NBD_MAX_SECTORS;
}
- return nbd_co_writev_1(client, sector_num, nb_sectors, qiov, offset);
+ return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset);
}
-int nbd_client_session_co_flush(NbdClientSession *client)
+int nbd_client_co_flush(BlockDriverState *bs)
{
+ NbdClientSession *client = nbd_get_client_session(bs);
struct nbd_request request = { .type = NBD_CMD_FLUSH };
struct nbd_reply reply;
ssize_t ret;
@@ -303,7 +309,7 @@ int nbd_client_session_co_flush(NbdClientSession *client)
request.len = 0;
nbd_coroutine_start(client, &request);
- ret = nbd_co_send_request(client, &request, NULL, 0);
+ ret = nbd_co_send_request(bs, &request, NULL, 0);
if (ret < 0) {
reply.error = -ret;
} else {
@@ -313,9 +319,10 @@ int nbd_client_session_co_flush(NbdClientSession *client)
return -reply.error;
}
-int nbd_client_session_co_discard(NbdClientSession *client, int64_t sector_num,
- int nb_sectors)
+int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors)
{
+ NbdClientSession *client = nbd_get_client_session(bs);
struct nbd_request request = { .type = NBD_CMD_TRIM };
struct nbd_reply reply;
ssize_t ret;
@@ -327,7 +334,7 @@ int nbd_client_session_co_discard(NbdClientSession *client, int64_t sector_num,
request.len = nb_sectors * 512;
nbd_coroutine_start(client, &request);
- ret = nbd_co_send_request(client, &request, NULL, 0);
+ ret = nbd_co_send_request(bs, &request, NULL, 0);
if (ret < 0) {
reply.error = -ret;
} else {
@@ -338,43 +345,41 @@ int nbd_client_session_co_discard(NbdClientSession *client, int64_t sector_num,
}
-void nbd_client_session_detach_aio_context(NbdClientSession *client)
+void nbd_client_detach_aio_context(BlockDriverState *bs)
{
- aio_set_fd_handler(bdrv_get_aio_context(client->bs), client->sock,
- NULL, NULL, NULL);
+ aio_set_fd_handler(bdrv_get_aio_context(bs),
+ nbd_get_client_session(bs)->sock, NULL, NULL, NULL);
}
-void nbd_client_session_attach_aio_context(NbdClientSession *client,
- AioContext *new_context)
+void nbd_client_attach_aio_context(BlockDriverState *bs,
+ AioContext *new_context)
{
- aio_set_fd_handler(new_context, client->sock,
- nbd_reply_ready, NULL, client);
+ aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sock,
+ nbd_reply_ready, NULL, bs);
}
-void nbd_client_session_close(NbdClientSession *client)
+void nbd_client_close(BlockDriverState *bs)
{
+ NbdClientSession *client = nbd_get_client_session(bs);
struct nbd_request request = {
.type = NBD_CMD_DISC,
.from = 0,
.len = 0
};
- if (!client->bs) {
- return;
- }
if (client->sock == -1) {
return;
}
nbd_send_request(client->sock, &request);
- nbd_teardown_connection(client);
- client->bs = NULL;
+ nbd_teardown_connection(bs);
}
-int nbd_client_session_init(NbdClientSession *client, BlockDriverState *bs,
- int sock, const char *export)
+int nbd_client_init(BlockDriverState *bs, int sock, const char *export,
+ Error **errp)
{
+ NbdClientSession *client = nbd_get_client_session(bs);
int ret;
/* NBD handshake */
@@ -382,7 +387,7 @@ int nbd_client_session_init(NbdClientSession *client, BlockDriverState *bs,
qemu_set_block(sock);
ret = nbd_receive_negotiate(sock, export,
&client->nbdflags, &client->size,
- &client->blocksize);
+ &client->blocksize, errp);
if (ret < 0) {
logout("Failed to negotiate with the NBD server\n");
closesocket(sock);
@@ -391,13 +396,12 @@ int nbd_client_session_init(NbdClientSession *client, BlockDriverState *bs,
qemu_co_mutex_init(&client->send_mutex);
qemu_co_mutex_init(&client->free_sema);
- client->bs = bs;
client->sock = sock;
/* Now that we're connected, set the socket to be non-blocking and
* kick the reply mechanism. */
qemu_set_nonblock(sock);
- nbd_client_session_attach_aio_context(client, bdrv_get_aio_context(bs));
+ nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs));
logout("Established connection with NBD server\n");
return 0;
diff --git a/block/nbd-client.h b/block/nbd-client.h
index cd478f3a98..fa4ff42d22 100644
--- a/block/nbd-client.h
+++ b/block/nbd-client.h
@@ -31,24 +31,24 @@ typedef struct NbdClientSession {
struct nbd_reply reply;
bool is_unix;
-
- BlockDriverState *bs;
} NbdClientSession;
-int nbd_client_session_init(NbdClientSession *client, BlockDriverState *bs,
- int sock, const char *export_name);
-void nbd_client_session_close(NbdClientSession *client);
-
-int nbd_client_session_co_discard(NbdClientSession *client, int64_t sector_num,
- int nb_sectors);
-int nbd_client_session_co_flush(NbdClientSession *client);
-int nbd_client_session_co_writev(NbdClientSession *client, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov);
-int nbd_client_session_co_readv(NbdClientSession *client, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov);
-
-void nbd_client_session_detach_aio_context(NbdClientSession *client);
-void nbd_client_session_attach_aio_context(NbdClientSession *client,
- AioContext *new_context);
+NbdClientSession *nbd_get_client_session(BlockDriverState *bs);
+
+int nbd_client_init(BlockDriverState *bs, int sock, const char *export_name,
+ Error **errp);
+void nbd_client_close(BlockDriverState *bs);
+
+int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors);
+int nbd_client_co_flush(BlockDriverState *bs);
+int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, QEMUIOVector *qiov);
+int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, QEMUIOVector *qiov);
+
+void nbd_client_detach_aio_context(BlockDriverState *bs);
+void nbd_client_attach_aio_context(BlockDriverState *bs,
+ AioContext *new_context);
#endif /* NBD_CLIENT_H */
diff --git a/block/nbd.c b/block/nbd.c
index 04cc845076..2f3b9adf72 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -224,6 +224,12 @@ static void nbd_config(BDRVNBDState *s, QDict *options, char **export,
}
}
+NbdClientSession *nbd_get_client_session(BlockDriverState *bs)
+{
+ BDRVNBDState *s = bs->opaque;
+ return &s->client;
+}
+
static int nbd_establish_connection(BlockDriverState *bs, Error **errp)
{
BDRVNBDState *s = bs->opaque;
@@ -271,7 +277,7 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
}
/* NBD handshake */
- result = nbd_client_session_init(&s->client, bs, sock, export);
+ result = nbd_client_init(bs, sock, export, errp);
g_free(export);
return result;
}
@@ -279,35 +285,30 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{
- BDRVNBDState *s = bs->opaque;
-
- return nbd_client_session_co_readv(&s->client, sector_num,
- nb_sectors, qiov);
+ return nbd_client_co_readv(bs, sector_num, nb_sectors, qiov);
}
static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{
- BDRVNBDState *s = bs->opaque;
-
- return nbd_client_session_co_writev(&s->client, sector_num,
- nb_sectors, qiov);
+ return nbd_client_co_writev(bs, sector_num, nb_sectors, qiov);
}
static int nbd_co_flush(BlockDriverState *bs)
{
- BDRVNBDState *s = bs->opaque;
+ return nbd_client_co_flush(bs);
+}
- return nbd_client_session_co_flush(&s->client);
+static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
+{
+ bs->bl.max_discard = UINT32_MAX >> BDRV_SECTOR_BITS;
+ bs->bl.max_transfer_length = UINT32_MAX >> BDRV_SECTOR_BITS;
}
static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
- BDRVNBDState *s = bs->opaque;
-
- return nbd_client_session_co_discard(&s->client, sector_num,
- nb_sectors);
+ return nbd_client_co_discard(bs, sector_num, nb_sectors);
}
static void nbd_close(BlockDriverState *bs)
@@ -315,7 +316,7 @@ static void nbd_close(BlockDriverState *bs)
BDRVNBDState *s = bs->opaque;
qemu_opts_del(s->socket_opts);
- nbd_client_session_close(&s->client);
+ nbd_client_close(bs);
}
static int64_t nbd_getlength(BlockDriverState *bs)
@@ -327,17 +328,13 @@ static int64_t nbd_getlength(BlockDriverState *bs)
static void nbd_detach_aio_context(BlockDriverState *bs)
{
- BDRVNBDState *s = bs->opaque;
-
- nbd_client_session_detach_aio_context(&s->client);
+ nbd_client_detach_aio_context(bs);
}
static void nbd_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
- BDRVNBDState *s = bs->opaque;
-
- nbd_client_session_attach_aio_context(&s->client, new_context);
+ nbd_client_attach_aio_context(bs, new_context);
}
static void nbd_refresh_filename(BlockDriverState *bs)
@@ -396,6 +393,7 @@ static BlockDriver bdrv_nbd = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
@@ -413,6 +411,7 @@ static BlockDriver bdrv_nbd_tcp = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
@@ -430,6 +429,7 @@ static BlockDriver bdrv_nbd_unix = {
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
+ .bdrv_refresh_limits = nbd_refresh_limits,
.bdrv_getlength = nbd_getlength,
.bdrv_detach_aio_context = nbd_detach_aio_context,
.bdrv_attach_aio_context = nbd_attach_aio_context,
diff --git a/block/qapi.c b/block/qapi.c
index 75c388e90b..1808e67336 100644
--- a/block/qapi.c
+++ b/block/qapi.c
@@ -24,6 +24,7 @@
#include "block/qapi.h"
#include "block/block_int.h"
+#include "block/write-threshold.h"
#include "qmp-commands.h"
#include "qapi-visit.h"
#include "qapi/qmp-output-visitor.h"
@@ -89,6 +90,8 @@ BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs)
info->iops_size = cfg.op_size;
}
+ info->write_threshold = bdrv_write_threshold_get(bs);
+
return info;
}
@@ -335,6 +338,8 @@ static BlockStats *bdrv_query_stats(const BlockDriverState *bs,
s->stats->wr_bytes = bs->stats.nr_bytes[BLOCK_ACCT_WRITE];
s->stats->rd_operations = bs->stats.nr_ops[BLOCK_ACCT_READ];
s->stats->wr_operations = bs->stats.nr_ops[BLOCK_ACCT_WRITE];
+ s->stats->rd_merged = bs->stats.merged[BLOCK_ACCT_READ];
+ s->stats->wr_merged = bs->stats.merged[BLOCK_ACCT_WRITE];
s->stats->wr_highest_offset =
bs->stats.wr_highest_sector * BDRV_SECTOR_SIZE;
s->stats->flush_operations = bs->stats.nr_ops[BLOCK_ACCT_FLUSH];
diff --git a/block/qcow.c b/block/qcow.c
index ccbe9e0d2c..055896910e 100644
--- a/block/qcow.c
+++ b/block/qcow.c
@@ -215,7 +215,7 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
/* read the backing file name */
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
- if (len > 1023 || len > sizeof(bs->backing_file)) {
+ if (len > 1023 || len >= sizeof(bs->backing_file)) {
error_setg(errp, "Backing file name too long");
ret = -EINVAL;
goto fail;
diff --git a/block/qcow2-cache.c b/block/qcow2-cache.c
index 904f6b1f44..b1155492a3 100644
--- a/block/qcow2-cache.c
+++ b/block/qcow2-cache.c
@@ -253,7 +253,9 @@ static int qcow2_cache_find_entry_to_replace(Qcow2Cache *c)
/* Give newer hits priority */
/* TODO Check how to optimize the replacement strategy */
- c->entries[i].cache_hits /= 2;
+ if (c->entries[i].cache_hits > 1) {
+ c->entries[i].cache_hits /= 2;
+ }
}
if (min_index == -1) {
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 9afdb40b40..9b80ca79ea 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -759,54 +759,54 @@ int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
{
BDRVQcowState *s = bs->opaque;
- int64_t offset, cluster_offset;
- int free_in_cluster;
+ int64_t offset;
+ size_t free_in_cluster;
+ int ret;
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
assert(size > 0 && size <= s->cluster_size);
- if (s->free_byte_offset == 0) {
- offset = qcow2_alloc_clusters(bs, s->cluster_size);
- if (offset < 0) {
- return offset;
+ assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset));
+
+ offset = s->free_byte_offset;
+
+ if (offset) {
+ int refcount = qcow2_get_refcount(bs, offset >> s->cluster_bits);
+ if (refcount < 0) {
+ return refcount;
}
- s->free_byte_offset = offset;
- }
- redo:
- free_in_cluster = s->cluster_size -
- offset_into_cluster(s, s->free_byte_offset);
- if (size <= free_in_cluster) {
- /* enough space in current cluster */
- offset = s->free_byte_offset;
- s->free_byte_offset += size;
- free_in_cluster -= size;
- if (free_in_cluster == 0)
- s->free_byte_offset = 0;
- if (offset_into_cluster(s, offset) != 0)
- qcow2_update_cluster_refcount(bs, offset >> s->cluster_bits, 1,
- QCOW2_DISCARD_NEVER);
- } else {
- offset = qcow2_alloc_clusters(bs, s->cluster_size);
- if (offset < 0) {
- return offset;
+
+ if (refcount == 0xffff) {
+ offset = 0;
}
- cluster_offset = start_of_cluster(s, s->free_byte_offset);
- if ((cluster_offset + s->cluster_size) == offset) {
- /* we are lucky: contiguous data */
- offset = s->free_byte_offset;
- qcow2_update_cluster_refcount(bs, offset >> s->cluster_bits, 1,
- QCOW2_DISCARD_NEVER);
- s->free_byte_offset += size;
- } else {
- s->free_byte_offset = offset;
- goto redo;
+ }
+
+ free_in_cluster = s->cluster_size - offset_into_cluster(s, offset);
+ if (!offset || free_in_cluster < size) {
+ int64_t new_cluster = alloc_clusters_noref(bs, s->cluster_size);
+ if (new_cluster < 0) {
+ return new_cluster;
+ }
+
+ if (!offset || ROUND_UP(offset, s->cluster_size) != new_cluster) {
+ offset = new_cluster;
}
}
- /* The cluster refcount was incremented, either by qcow2_alloc_clusters()
- * or explicitly by qcow2_update_cluster_refcount(). Refcount blocks must
- * be flushed before the caller's L2 table updates.
- */
+ assert(offset);
+ ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* The cluster refcount was incremented; refcount blocks must be flushed
+ * before the caller's L2 table updates. */
qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
+
+ s->free_byte_offset = offset + size;
+ if (!offset_into_cluster(s, s->free_byte_offset)) {
+ s->free_byte_offset = 0;
+ }
+
return offset;
}
diff --git a/block/qcow2.c b/block/qcow2.c
index dbaf016bc7..2ed8d95b1f 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -869,7 +869,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
if (len > MIN(1023, s->cluster_size - header.backing_file_offset) ||
- len > sizeof(bs->backing_file)) {
+ len >= sizeof(bs->backing_file)) {
error_setg(errp, "Backing file name too long");
ret = -EINVAL;
goto fail;
@@ -2521,15 +2521,12 @@ static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
{
BDRVQcowState *s = bs->opaque;
int64_t total_sectors = bs->total_sectors;
- int growable = bs->growable;
bool zero_beyond_eof = bs->zero_beyond_eof;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
- bs->growable = 1;
bs->zero_beyond_eof = false;
ret = bdrv_pwritev(bs, qcow2_vm_state_offset(s) + pos, qiov);
- bs->growable = growable;
bs->zero_beyond_eof = zero_beyond_eof;
/* bdrv_co_do_writev will have increased the total_sectors value to include
@@ -2544,15 +2541,12 @@ static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf,
int64_t pos, int size)
{
BDRVQcowState *s = bs->opaque;
- int growable = bs->growable;
bool zero_beyond_eof = bs->zero_beyond_eof;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
- bs->growable = 1;
bs->zero_beyond_eof = false;
ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size);
- bs->growable = growable;
bs->zero_beyond_eof = zero_beyond_eof;
return ret;
diff --git a/block/qed.c b/block/qed.c
index 80f18d82e2..892b13c806 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -440,6 +440,11 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
s->l2_mask = s->table_nelems - 1;
s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
+ /* Header size calculation must not overflow uint32_t */
+ if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
+ return -EINVAL;
+ }
+
if ((s->header.features & QED_F_BACKING_FILE)) {
if ((uint64_t)s->header.backing_filename_offset +
s->header.backing_filename_size >
diff --git a/block/qed.h b/block/qed.h
index d3934a05cd..615e676fc8 100644
--- a/block/qed.h
+++ b/block/qed.h
@@ -133,7 +133,6 @@ typedef struct QEDAIOCB {
int bh_ret; /* final return status for completion bh */
QSIMPLEQ_ENTRY(QEDAIOCB) next; /* next request */
int flags; /* QED_AIOCB_* bits ORed together */
- bool *finished; /* signal for cancel completion */
uint64_t end_pos; /* request end on block device, in bytes */
/* User scatter-gather list */
diff --git a/block/raw-posix.c b/block/raw-posix.c
index e51293a455..b5f077a8f1 100644
--- a/block/raw-posix.c
+++ b/block/raw-posix.c
@@ -60,7 +60,7 @@
#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
#endif
#endif
-#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
+#if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE)
#include <linux/falloc.h>
#endif
#if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
@@ -147,6 +147,7 @@ typedef struct BDRVRawState {
bool has_discard:1;
bool has_write_zeroes:1;
bool discard_zeroes:1;
+ bool has_fallocate;
bool needs_alignment;
} BDRVRawState;
@@ -452,6 +453,7 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
}
if (S_ISREG(st.st_mode)) {
s->discard_zeroes = true;
+ s->has_fallocate = true;
}
if (S_ISBLK(st.st_mode)) {
#ifdef BLKDISCARDZEROES
@@ -893,40 +895,108 @@ static int xfs_discard(BDRVRawState *s, int64_t offset, uint64_t bytes)
}
#endif
-static ssize_t handle_aiocb_write_zeroes(RawPosixAIOData *aiocb)
+static int translate_err(int err)
{
- int ret = -EOPNOTSUPP;
+ if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP ||
+ err == -ENOTTY) {
+ err = -ENOTSUP;
+ }
+ return err;
+}
+
+#ifdef CONFIG_FALLOCATE
+static int do_fallocate(int fd, int mode, off_t offset, off_t len)
+{
+ do {
+ if (fallocate(fd, mode, offset, len) == 0) {
+ return 0;
+ }
+ } while (errno == EINTR);
+ return translate_err(-errno);
+}
+#endif
+
+static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb)
+{
+ int ret = -ENOTSUP;
BDRVRawState *s = aiocb->bs->opaque;
- if (s->has_write_zeroes == 0) {
+ if (!s->has_write_zeroes) {
return -ENOTSUP;
}
- if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
#ifdef BLKZEROOUT
- do {
- uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
- if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) {
- return 0;
- }
- } while (errno == EINTR);
+ do {
+ uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
+ if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) {
+ return 0;
+ }
+ } while (errno == EINTR);
- ret = -errno;
+ ret = translate_err(-errno);
#endif
- } else {
+
+ if (ret == -ENOTSUP) {
+ s->has_write_zeroes = false;
+ }
+ return ret;
+}
+
+static ssize_t handle_aiocb_write_zeroes(RawPosixAIOData *aiocb)
+{
+ BDRVRawState *s = aiocb->bs->opaque;
+
+ if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
+ return handle_aiocb_write_zeroes_block(aiocb);
+ }
+
#ifdef CONFIG_XFS
- if (s->is_xfs) {
- return xfs_write_zeroes(s, aiocb->aio_offset, aiocb->aio_nbytes);
+ if (s->is_xfs) {
+ return xfs_write_zeroes(s, aiocb->aio_offset, aiocb->aio_nbytes);
+ }
+#endif
+
+#ifdef CONFIG_FALLOCATE_ZERO_RANGE
+ if (s->has_write_zeroes) {
+ int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE,
+ aiocb->aio_offset, aiocb->aio_nbytes);
+ if (ret == 0 || ret != -ENOTSUP) {
+ return ret;
}
+ s->has_write_zeroes = false;
+ }
#endif
+
+#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
+ if (s->has_discard && s->has_fallocate) {
+ int ret = do_fallocate(s->fd,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ aiocb->aio_offset, aiocb->aio_nbytes);
+ if (ret == 0) {
+ ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
+ if (ret == 0 || ret != -ENOTSUP) {
+ return ret;
+ }
+ s->has_fallocate = false;
+ } else if (ret != -ENOTSUP) {
+ return ret;
+ } else {
+ s->has_discard = false;
+ }
}
+#endif
- if (ret == -ENODEV || ret == -ENOSYS || ret == -EOPNOTSUPP ||
- ret == -ENOTTY) {
- s->has_write_zeroes = false;
- ret = -ENOTSUP;
+#ifdef CONFIG_FALLOCATE
+ if (s->has_fallocate && aiocb->aio_offset >= bdrv_getlength(aiocb->bs)) {
+ int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
+ if (ret == 0 || ret != -ENOTSUP) {
+ return ret;
+ }
+ s->has_fallocate = false;
}
- return ret;
+#endif
+
+ return -ENOTSUP;
}
static ssize_t handle_aiocb_discard(RawPosixAIOData *aiocb)
@@ -957,21 +1027,14 @@ static ssize_t handle_aiocb_discard(RawPosixAIOData *aiocb)
#endif
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
- do {
- if (fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- aiocb->aio_offset, aiocb->aio_nbytes) == 0) {
- return 0;
- }
- } while (errno == EINTR);
-
- ret = -errno;
+ ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ aiocb->aio_offset, aiocb->aio_nbytes);
#endif
}
- if (ret == -ENODEV || ret == -ENOSYS || ret == -EOPNOTSUPP ||
- ret == -ENOTTY) {
+ ret = translate_err(ret);
+ if (ret == -ENOTSUP) {
s->has_discard = false;
- ret = -ENOTSUP;
}
return ret;
}
@@ -984,7 +1047,7 @@ static int aio_worker(void *arg)
switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
case QEMU_AIO_READ:
ret = handle_aiocb_rw(aiocb);
- if (ret >= 0 && ret < aiocb->aio_nbytes && aiocb->bs->growable) {
+ if (ret >= 0 && ret < aiocb->aio_nbytes) {
iov_memset(aiocb->aio_iov, aiocb->aio_niov, ret,
0, aiocb->aio_nbytes - ret);
@@ -1312,7 +1375,20 @@ again:
if (size == 0)
#endif
#if defined(__APPLE__) && defined(__MACH__)
- size = LLONG_MAX;
+ {
+ uint64_t sectors = 0;
+ uint32_t sector_size = 0;
+
+ if (ioctl(fd, DKIOCGETBLOCKCOUNT, &sectors) == 0
+ && ioctl(fd, DKIOCGETBLOCKSIZE, &sector_size) == 0) {
+ size = sectors * sector_size;
+ } else {
+ size = lseek(fd, 0LL, SEEK_END);
+ if (size < 0) {
+ return -errno;
+ }
+ }
+ }
#else
size = lseek(fd, 0LL, SEEK_END);
if (size < 0) {
diff --git a/block/raw-win32.c b/block/raw-win32.c
index 06243d76df..dae5d2fee9 100644
--- a/block/raw-win32.c
+++ b/block/raw-win32.c
@@ -101,7 +101,7 @@ static int aio_worker(void *arg)
switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
case QEMU_AIO_READ:
count = handle_aiocb_rw(aiocb);
- if (count < aiocb->aio_nbytes && aiocb->bs->growable) {
+ if (count < aiocb->aio_nbytes) {
/* A short read means that we have reached EOF. Pad the buffer
* with zeros for bytes after EOF. */
iov_memset(aiocb->aio_iov, aiocb->aio_niov, count,
diff --git a/block/sheepdog.c b/block/sheepdog.c
index be3176fcb4..b320871e9c 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -1730,7 +1730,7 @@ static int sd_create(const char *filename, QemuOpts *opts,
BlockDriver *drv;
/* Currently, only Sheepdog backing image is supported. */
- drv = bdrv_find_protocol(backing_file, true);
+ drv = bdrv_find_protocol(backing_file, true, NULL);
if (!drv || strcmp(drv->protocol_name, "sheepdog") != 0) {
error_setg(errp, "backing_file must be a sheepdog image");
ret = -EINVAL;
@@ -2117,7 +2117,7 @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
int64_t offset = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE;
BDRVSheepdogState *s = bs->opaque;
- if (bs->growable && offset > s->inode.vdi_size) {
+ if (offset > s->inode.vdi_size) {
ret = sd_truncate(bs, offset);
if (ret < 0) {
return ret;
diff --git a/block/vmdk.c b/block/vmdk.c
index 7d079adc4a..8410a158a2 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -843,8 +843,7 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
}
extent_path = g_malloc0(PATH_MAX);
- path_combine(extent_path, sizeof(extent_path),
- desc_file_path, fname);
+ path_combine(extent_path, PATH_MAX, desc_file_path, fname);
extent_file = NULL;
ret = bdrv_open(&extent_file, extent_path, NULL, NULL,
bs->open_flags | BDRV_O_PROTOCOL, NULL, errp);
diff --git a/block/write-threshold.c b/block/write-threshold.c
new file mode 100644
index 0000000000..c2cd517716
--- /dev/null
+++ b/block/write-threshold.c
@@ -0,0 +1,125 @@
+/*
+ * QEMU System Emulator block write threshold notification
+ *
+ * Copyright Red Hat, Inc. 2014
+ *
+ * Authors:
+ * Francesco Romani <fromani@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#include "block/block_int.h"
+#include "block/coroutine.h"
+#include "block/write-threshold.h"
+#include "qemu/notify.h"
+#include "qapi-event.h"
+#include "qmp-commands.h"
+
+
+uint64_t bdrv_write_threshold_get(const BlockDriverState *bs)
+{
+ return bs->write_threshold_offset;
+}
+
+bool bdrv_write_threshold_is_set(const BlockDriverState *bs)
+{
+ return bs->write_threshold_offset > 0;
+}
+
+static void write_threshold_disable(BlockDriverState *bs)
+{
+ if (bdrv_write_threshold_is_set(bs)) {
+ notifier_with_return_remove(&bs->write_threshold_notifier);
+ bs->write_threshold_offset = 0;
+ }
+}
+
+uint64_t bdrv_write_threshold_exceeded(const BlockDriverState *bs,
+ const BdrvTrackedRequest *req)
+{
+ if (bdrv_write_threshold_is_set(bs)) {
+ if (req->offset > bs->write_threshold_offset) {
+ return (req->offset - bs->write_threshold_offset) + req->bytes;
+ }
+ if ((req->offset + req->bytes) > bs->write_threshold_offset) {
+ return (req->offset + req->bytes) - bs->write_threshold_offset;
+ }
+ }
+ return 0;
+}
+
+static int coroutine_fn before_write_notify(NotifierWithReturn *notifier,
+ void *opaque)
+{
+ BdrvTrackedRequest *req = opaque;
+ BlockDriverState *bs = req->bs;
+ uint64_t amount = 0;
+
+ amount = bdrv_write_threshold_exceeded(bs, req);
+ if (amount > 0) {
+ qapi_event_send_block_write_threshold(
+ bs->node_name,
+ amount,
+ bs->write_threshold_offset,
+ &error_abort);
+
+ /* autodisable to avoid flooding the monitor */
+ write_threshold_disable(bs);
+ }
+
+ return 0; /* should always let other notifiers run */
+}
+
+static void write_threshold_register_notifier(BlockDriverState *bs)
+{
+ bs->write_threshold_notifier.notify = before_write_notify;
+ notifier_with_return_list_add(&bs->before_write_notifiers,
+ &bs->write_threshold_notifier);
+}
+
+static void write_threshold_update(BlockDriverState *bs,
+ int64_t threshold_bytes)
+{
+ bs->write_threshold_offset = threshold_bytes;
+}
+
+void bdrv_write_threshold_set(BlockDriverState *bs, uint64_t threshold_bytes)
+{
+ if (bdrv_write_threshold_is_set(bs)) {
+ if (threshold_bytes > 0) {
+ write_threshold_update(bs, threshold_bytes);
+ } else {
+ write_threshold_disable(bs);
+ }
+ } else {
+ if (threshold_bytes > 0) {
+ /* avoid multiple registration */
+ write_threshold_register_notifier(bs);
+ write_threshold_update(bs, threshold_bytes);
+ }
+ /* discard bogus disable request */
+ }
+}
+
+void qmp_block_set_write_threshold(const char *node_name,
+ uint64_t threshold_bytes,
+ Error **errp)
+{
+ BlockDriverState *bs;
+ AioContext *aio_context;
+
+ bs = bdrv_find_node(node_name);
+ if (!bs) {
+ error_set(errp, QERR_DEVICE_NOT_FOUND, node_name);
+ return;
+ }
+
+ aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(aio_context);
+
+ bdrv_write_threshold_set(bs, threshold_bytes);
+
+ aio_context_release(aio_context);
+}
diff --git a/blockdev.c b/blockdev.c
index d59efd3f15..6eedcf56f8 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -354,13 +354,11 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
ThrottleConfig cfg;
int snapshot = 0;
bool copy_on_read;
- int ret;
Error *error = NULL;
QemuOpts *opts;
const char *id;
bool has_driver_specific_opts;
BlockdevDetectZeroesOptions detect_zeroes;
- BlockDriver *drv = NULL;
/* Check common options by copying from bs_opts to opts, all other options
* stay in bs_opts for processing by bdrv_open(). */
@@ -426,11 +424,11 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
goto early_err;
}
- drv = bdrv_find_format(buf);
- if (!drv) {
- error_setg(errp, "'%s' invalid format", buf);
+ if (qdict_haskey(bs_opts, "driver")) {
+ error_setg(errp, "Cannot specify both 'driver' and 'format'");
goto early_err;
}
+ qdict_put(bs_opts, "driver", qstring_from_str(buf));
}
/* disk I/O throttling */
@@ -505,70 +503,64 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
}
/* init */
- blk = blk_new_with_bs(qemu_opts_id(opts), errp);
- if (!blk) {
- goto early_err;
- }
- bs = blk_bs(blk);
- bs->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0;
- bs->read_only = ro;
- bs->detect_zeroes = detect_zeroes;
+ if ((!file || !*file) && !has_driver_specific_opts) {
+ blk = blk_new_with_bs(qemu_opts_id(opts), errp);
+ if (!blk) {
+ goto early_err;
+ }
- bdrv_set_on_error(bs, on_read_error, on_write_error);
+ bs = blk_bs(blk);
+ bs->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0;
+ bs->read_only = ro;
- /* disk I/O throttling */
- if (throttle_enabled(&cfg)) {
- bdrv_io_limits_enable(bs);
- bdrv_set_io_limits(bs, &cfg);
- }
-
- if (!file || !*file) {
- if (has_driver_specific_opts) {
+ QDECREF(bs_opts);
+ } else {
+ if (file && !*file) {
file = NULL;
- } else {
- QDECREF(bs_opts);
- qemu_opts_del(opts);
- return blk;
}
- }
- if (snapshot) {
- /* always use cache=unsafe with snapshot */
- bdrv_flags &= ~BDRV_O_CACHE_MASK;
- bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH);
- }
- if (copy_on_read) {
- bdrv_flags |= BDRV_O_COPY_ON_READ;
- }
+ if (snapshot) {
+ /* always use cache=unsafe with snapshot */
+ bdrv_flags &= ~BDRV_O_CACHE_MASK;
+ bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH);
+ }
- if (runstate_check(RUN_STATE_INMIGRATE)) {
- bdrv_flags |= BDRV_O_INCOMING;
+ if (copy_on_read) {
+ bdrv_flags |= BDRV_O_COPY_ON_READ;
+ }
+
+ if (runstate_check(RUN_STATE_INMIGRATE)) {
+ bdrv_flags |= BDRV_O_INCOMING;
+ }
+
+ bdrv_flags |= ro ? 0 : BDRV_O_RDWR;
+
+ blk = blk_new_open(qemu_opts_id(opts), file, NULL, bs_opts, bdrv_flags,
+ errp);
+ if (!blk) {
+ goto err_no_bs_opts;
+ }
+ bs = blk_bs(blk);
}
- bdrv_flags |= ro ? 0 : BDRV_O_RDWR;
+ bs->detect_zeroes = detect_zeroes;
- QINCREF(bs_opts);
- ret = bdrv_open(&bs, file, NULL, bs_opts, bdrv_flags, drv, &error);
- assert(bs == blk_bs(blk));
+ bdrv_set_on_error(bs, on_read_error, on_write_error);
- if (ret < 0) {
- error_setg(errp, "could not open disk image %s: %s",
- file ?: blk_name(blk), error_get_pretty(error));
- error_free(error);
- goto err;
+ /* disk I/O throttling */
+ if (throttle_enabled(&cfg)) {
+ bdrv_io_limits_enable(bs);
+ bdrv_set_io_limits(bs, &cfg);
}
if (bdrv_key_required(bs)) {
autostart = 0;
}
- QDECREF(bs_opts);
+err_no_bs_opts:
qemu_opts_del(opts);
-
return blk;
-err:
- blk_unref(blk);
early_err:
qemu_opts_del(opts);
err_no_opts:
@@ -1793,7 +1785,6 @@ void qmp_block_passwd(bool has_device, const char *device,
Error *local_err = NULL;
BlockDriverState *bs;
AioContext *aio_context;
- int err;
bs = bdrv_lookup_bs(has_device ? device : NULL,
has_node_name ? node_name : NULL,
@@ -1806,16 +1797,8 @@ void qmp_block_passwd(bool has_device, const char *device,
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
- err = bdrv_set_key(bs, password);
- if (err == -EINVAL) {
- error_set(errp, QERR_DEVICE_NOT_ENCRYPTED, bdrv_get_device_name(bs));
- goto out;
- } else if (err < 0) {
- error_set(errp, QERR_INVALID_PASSWORD);
- goto out;
- }
+ bdrv_add_key(bs, password, errp);
-out:
aio_context_release(aio_context);
}
@@ -1833,18 +1816,7 @@ static void qmp_bdrv_open_encrypted(BlockDriverState *bs, const char *filename,
return;
}
- if (bdrv_key_required(bs)) {
- if (password) {
- if (bdrv_set_key(bs, password) < 0) {
- error_set(errp, QERR_INVALID_PASSWORD);
- }
- } else {
- error_set(errp, QERR_DEVICE_ENCRYPTED, bdrv_get_device_name(bs),
- bdrv_get_encrypted_filename(bs));
- }
- } else if (password) {
- error_set(errp, QERR_DEVICE_NOT_ENCRYPTED, bdrv_get_device_name(bs));
- }
+ bdrv_add_key(bs, password, errp);
}
void qmp_change_blockdev(const char *device, const char *filename,
@@ -2653,7 +2625,8 @@ out:
}
/* Get the block job for a given device name and acquire its AioContext */
-static BlockJob *find_block_job(const char *device, AioContext **aio_context)
+static BlockJob *find_block_job(const char *device, AioContext **aio_context,
+ Error **errp)
{
BlockDriverState *bs;
@@ -2673,6 +2646,8 @@ static BlockJob *find_block_job(const char *device, AioContext **aio_context)
return bs->job;
notfound:
+ error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
+ "No active block job on device '%s'", device);
*aio_context = NULL;
return NULL;
}
@@ -2680,10 +2655,9 @@ notfound:
void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
{
AioContext *aio_context;
- BlockJob *job = find_block_job(device, &aio_context);
+ BlockJob *job = find_block_job(device, &aio_context, errp);
if (!job) {
- error_set(errp, QERR_BLOCK_JOB_NOT_ACTIVE, device);
return;
}
@@ -2695,10 +2669,9 @@ void qmp_block_job_cancel(const char *device,
bool has_force, bool force, Error **errp)
{
AioContext *aio_context;
- BlockJob *job = find_block_job(device, &aio_context);
+ BlockJob *job = find_block_job(device, &aio_context, errp);
if (!job) {
- error_set(errp, QERR_BLOCK_JOB_NOT_ACTIVE, device);
return;
}
@@ -2721,10 +2694,9 @@ out:
void qmp_block_job_pause(const char *device, Error **errp)
{
AioContext *aio_context;
- BlockJob *job = find_block_job(device, &aio_context);
+ BlockJob *job = find_block_job(device, &aio_context, errp);
if (!job) {
- error_set(errp, QERR_BLOCK_JOB_NOT_ACTIVE, device);
return;
}
@@ -2736,10 +2708,9 @@ void qmp_block_job_pause(const char *device, Error **errp)
void qmp_block_job_resume(const char *device, Error **errp)
{
AioContext *aio_context;
- BlockJob *job = find_block_job(device, &aio_context);
+ BlockJob *job = find_block_job(device, &aio_context, errp);
if (!job) {
- error_set(errp, QERR_BLOCK_JOB_NOT_ACTIVE, device);
return;
}
@@ -2751,10 +2722,9 @@ void qmp_block_job_resume(const char *device, Error **errp)
void qmp_block_job_complete(const char *device, Error **errp)
{
AioContext *aio_context;
- BlockJob *job = find_block_job(device, &aio_context);
+ BlockJob *job = find_block_job(device, &aio_context, errp);
if (!job) {
- error_set(errp, QERR_BLOCK_JOB_NOT_ACTIVE, device);
return;
}
diff --git a/configure b/configure
index f185dd0838..7ba4bcb866 100755
--- a/configure
+++ b/configure
@@ -313,6 +313,7 @@ glx=""
zlib="yes"
lzo=""
snappy=""
+bzip2=""
guest_agent=""
guest_agent_with_vss="no"
vss_win32_sdk=""
@@ -1060,6 +1061,10 @@ for opt do
;;
--enable-snappy) snappy="yes"
;;
+ --disable-bzip2) bzip2="no"
+ ;;
+ --enable-bzip2) bzip2="yes"
+ ;;
--enable-guest-agent) guest_agent="yes"
;;
--disable-guest-agent) guest_agent="no"
@@ -1374,6 +1379,8 @@ Advanced options (experts only):
--enable-usb-redir enable usb network redirection support
--enable-lzo enable the support of lzo compression library
--enable-snappy enable the support of snappy compression library
+ --enable-bzip2 enable the support of bzip2 compression library (for
+ reading bzip2-compressed dmg images)
--disable-guest-agent disable building of the QEMU Guest Agent
--enable-guest-agent enable building of the QEMU Guest Agent
--with-vss-sdk=SDK-path enable Windows VSS support in QEMU Guest Agent
@@ -1820,6 +1827,24 @@ EOF
fi
##########################################
+# bzip2 check
+
+if test "$bzip2" != "no" ; then
+ cat > $TMPC << EOF
+#include <bzlib.h>
+int main(void) { BZ2_bzlibVersion(); return 0; }
+EOF
+ if compile_prog "" "-lbz2" ; then
+ bzip2="yes"
+ else
+ if test "$bzip2" = "yes"; then
+ feature_not_found "libbzip2" "Install libbzip2 devel"
+ fi
+ bzip2="no"
+ fi
+fi
+
+##########################################
# libseccomp check
if test "$seccomp" != "no" ; then
@@ -3335,6 +3360,22 @@ if compile_prog "" "" ; then
fallocate_punch_hole=yes
fi
+# check that fallocate supports range zeroing inside the file
+fallocate_zero_range=no
+cat > $TMPC << EOF
+#include <fcntl.h>
+#include <linux/falloc.h>
+
+int main(void)
+{
+ fallocate(0, FALLOC_FL_ZERO_RANGE, 0, 0);
+ return 0;
+}
+EOF
+if compile_prog "" "" ; then
+ fallocate_zero_range=yes
+fi
+
# check for posix_fallocate
posix_fallocate=no
cat > $TMPC << EOF
@@ -4369,6 +4410,7 @@ echo "vhdx $vhdx"
echo "Quorum $quorum"
echo "lzo support $lzo"
echo "snappy support $snappy"
+echo "bzip2 support $bzip2"
echo "NUMA host support $numa"
if test "$sdl_too_old" = "yes"; then
@@ -4567,6 +4609,9 @@ fi
if test "$fallocate_punch_hole" = "yes" ; then
echo "CONFIG_FALLOCATE_PUNCH_HOLE=y" >> $config_host_mak
fi
+if test "$fallocate_zero_range" = "yes" ; then
+ echo "CONFIG_FALLOCATE_ZERO_RANGE=y" >> $config_host_mak
+fi
if test "$posix_fallocate" = "yes" ; then
echo "CONFIG_POSIX_FALLOCATE=y" >> $config_host_mak
fi
@@ -4724,6 +4769,11 @@ if test "$snappy" = "yes" ; then
echo "CONFIG_SNAPPY=y" >> $config_host_mak
fi
+if test "$bzip2" = "yes" ; then
+ echo "CONFIG_BZIP2=y" >> $config_host_mak
+ echo "BZIP2_LIBS=-lbz2" >> $config_host_mak
+fi
+
if test "$libiscsi" = "yes" ; then
echo "CONFIG_LIBISCSI=m" >> $config_host_mak
echo "LIBISCSI_CFLAGS=$libiscsi_cflags" >> $config_host_mak
diff --git a/cpu-exec.c b/cpu-exec.c
index adb939a994..2ffeb6e40d 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -526,28 +526,22 @@ int cpu_exec(CPUArchState *env)
* interrupt_request) which we will handle
* next time around the loop.
*/
- tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
next_tb = 0;
break;
case TB_EXIT_ICOUNT_EXPIRED:
{
/* Instruction counter expired. */
- int insns_left;
- tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
- insns_left = cpu->icount_decr.u32;
+ int insns_left = cpu->icount_decr.u32;
if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */
cpu->icount_extra += insns_left;
- if (cpu->icount_extra > 0xffff) {
- insns_left = 0xffff;
- } else {
- insns_left = cpu->icount_extra;
- }
+ insns_left = MIN(0xffff, cpu->icount_extra);
cpu->icount_extra -= insns_left;
cpu->icount_decr.u16.low = insns_left;
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
+ tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
cpu_exec_nocache(env, insns_left, tb);
align_clocks(&sc, cpu);
}
diff --git a/cpus.c b/cpus.c
index b826fac09c..1cd9867893 100644
--- a/cpus.c
+++ b/cpus.c
@@ -361,15 +361,19 @@ static void icount_warp_rt(void *opaque)
void qtest_clock_warp(int64_t dest)
{
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ AioContext *aio_context;
assert(qtest_enabled());
+ aio_context = qemu_get_aio_context();
while (clock < dest) {
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
+
seqlock_write_lock(&timers_state.vm_clock_seqlock);
timers_state.qemu_icount_bias += warp;
seqlock_write_unlock(&timers_state.vm_clock_seqlock);
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
+ timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
diff --git a/default-configs/alpha-softmmu.mak b/default-configs/alpha-softmmu.mak
index bc07600f96..7f6161eba9 100644
--- a/default-configs/alpha-softmmu.mak
+++ b/default-configs/alpha-softmmu.mak
@@ -5,8 +5,6 @@ include usb.mak
CONFIG_SERIAL=y
CONFIG_I8254=y
CONFIG_PCKBD=y
-CONFIG_VGA=y
-CONFIG_VGA_PCI=y
CONFIG_VGA_CIRRUS=y
CONFIG_IDE_CORE=y
CONFIG_IDE_QDEV=y
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
index f3513fa124..7671ee278a 100644
--- a/default-configs/arm-softmmu.mak
+++ b/default-configs/arm-softmmu.mak
@@ -82,6 +82,8 @@ CONFIG_ZYNQ=y
CONFIG_VERSATILE_PCI=y
CONFIG_VERSATILE_I2C=y
+CONFIG_PCI_GENERIC=y
+
CONFIG_SDHCI=y
CONFIG_INTEGRATOR_DEBUG=y
diff --git a/default-configs/i386-softmmu.mak b/default-configs/i386-softmmu.mak
index 8e08841760..bd99af9942 100644
--- a/default-configs/i386-softmmu.mak
+++ b/default-configs/i386-softmmu.mak
@@ -3,9 +3,7 @@
include pci.mak
include sound.mak
include usb.mak
-CONFIG_VGA=y
CONFIG_QXL=$(CONFIG_SPICE)
-CONFIG_VGA_PCI=y
CONFIG_VGA_ISA=y
CONFIG_VGA_CIRRUS=y
CONFIG_VMWARE_VGA=y
diff --git a/default-configs/mips-softmmu.mak b/default-configs/mips-softmmu.mak
index 2a80b04dc0..cce2c8116b 100644
--- a/default-configs/mips-softmmu.mak
+++ b/default-configs/mips-softmmu.mak
@@ -4,8 +4,6 @@ include pci.mak
include sound.mak
include usb.mak
CONFIG_ESP=y
-CONFIG_VGA=y
-CONFIG_VGA_PCI=y
CONFIG_VGA_ISA=y
CONFIG_VGA_ISA_MM=y
CONFIG_VGA_CIRRUS=y
diff --git a/default-configs/mips64-softmmu.mak b/default-configs/mips64-softmmu.mak
index f1f933bc44..7a88a08adc 100644
--- a/default-configs/mips64-softmmu.mak
+++ b/default-configs/mips64-softmmu.mak
@@ -4,8 +4,6 @@ include pci.mak
include sound.mak
include usb.mak
CONFIG_ESP=y
-CONFIG_VGA=y
-CONFIG_VGA_PCI=y
CONFIG_VGA_ISA=y
CONFIG_VGA_ISA_MM=y
CONFIG_VGA_CIRRUS=y
diff --git a/default-configs/mips64el-softmmu.mak b/default-configs/mips64el-softmmu.mak
index 317b151361..095de43186 100644
--- a/default-configs/mips64el-softmmu.mak
+++ b/default-configs/mips64el-softmmu.mak
@@ -4,8 +4,6 @@ include pci.mak
include sound.mak
include usb.mak
CONFIG_ESP=y
-CONFIG_VGA=y
-CONFIG_VGA_PCI=y
CONFIG_VGA_ISA=y
CONFIG_VGA_ISA_MM=y
CONFIG_VGA_CIRRUS=y
diff --git a/default-configs/mipsel-softmmu.mak b/default-configs/mipsel-softmmu.mak
index 7708185f6b..0e25108b7f 100644
--- a/default-configs/mipsel-softmmu.mak
+++ b/default-configs/mipsel-softmmu.mak
@@ -4,8 +4,6 @@ include pci.mak
include sound.mak
include usb.mak
CONFIG_ESP=y
-CONFIG_VGA=y
-CONFIG_VGA_PCI=y
CONFIG_VGA_ISA=y
CONFIG_VGA_ISA_MM=y
CONFIG_VGA_CIRRUS=y
diff --git a/default-configs/pci.mak b/default-configs/pci.mak
index 030cdc7d3d..bea6b01553 100644
--- a/default-configs/pci.mak
+++ b/default-configs/pci.mak
@@ -33,3 +33,5 @@ CONFIG_NVME_PCI=y
CONFIG_SD=y
CONFIG_SDHCI=y
CONFIG_EDU=y
+CONFIG_VGA=y
+CONFIG_VGA_PCI=y
diff --git a/default-configs/ppc-softmmu.mak b/default-configs/ppc-softmmu.mak
index d725b23312..aebfab92eb 100644
--- a/default-configs/ppc-softmmu.mak
+++ b/default-configs/ppc-softmmu.mak
@@ -6,8 +6,6 @@ include usb.mak
CONFIG_ISA_MMIO=y
CONFIG_ESCC=y
CONFIG_M48T59=y
-CONFIG_VGA=y
-CONFIG_VGA_PCI=y
CONFIG_SERIAL=y
CONFIG_PARALLEL=y
CONFIG_I8254=y
diff --git a/default-configs/ppc64-softmmu.mak b/default-configs/ppc64-softmmu.mak
index bd30d69347..f195a8721a 100644
--- a/default-configs/ppc64-softmmu.mak
+++ b/default-configs/ppc64-softmmu.mak
@@ -6,8 +6,6 @@ include usb.mak
CONFIG_ISA_MMIO=y
CONFIG_ESCC=y
CONFIG_M48T59=y
-CONFIG_VGA=y
-CONFIG_VGA_PCI=y
CONFIG_SERIAL=y
CONFIG_PARALLEL=y
CONFIG_I8254=y
diff --git a/default-configs/ppcemb-softmmu.mak b/default-configs/ppcemb-softmmu.mak
index e032761457..a1b3d5f35d 100644
--- a/default-configs/ppcemb-softmmu.mak
+++ b/default-configs/ppcemb-softmmu.mak
@@ -4,8 +4,6 @@ include pci.mak
include sound.mak
include usb.mak
CONFIG_M48T59=y
-CONFIG_VGA=y
-CONFIG_VGA_PCI=y
CONFIG_SERIAL=y
CONFIG_I8257=y
CONFIG_OPENPIC=y
diff --git a/default-configs/sparc64-softmmu.mak b/default-configs/sparc64-softmmu.mak
index 299c97b715..123bb993ef 100644
--- a/default-configs/sparc64-softmmu.mak
+++ b/default-configs/sparc64-softmmu.mak
@@ -5,8 +5,6 @@ include usb.mak
CONFIG_ISA_MMIO=y
CONFIG_M48T59=y
CONFIG_PTIMER=y
-CONFIG_VGA=y
-CONFIG_VGA_PCI=y
CONFIG_SERIAL=y
CONFIG_PARALLEL=y
CONFIG_PCKBD=y
diff --git a/default-configs/x86_64-softmmu.mak b/default-configs/x86_64-softmmu.mak
index 66557ac590..e7c273417b 100644
--- a/default-configs/x86_64-softmmu.mak
+++ b/default-configs/x86_64-softmmu.mak
@@ -3,9 +3,7 @@
include pci.mak
include sound.mak
include usb.mak
-CONFIG_VGA=y
CONFIG_QXL=$(CONFIG_SPICE)
-CONFIG_VGA_PCI=y
CONFIG_VGA_ISA=y
CONFIG_VGA_CIRRUS=y
CONFIG_VMWARE_VGA=y
diff --git a/disas/sh4.c b/disas/sh4.c
index f6cadd55c0..020f5ebb7b 100644
--- a/disas/sh4.c
+++ b/disas/sh4.c
@@ -332,7 +332,7 @@ typedef struct
#ifdef DEFINE_TABLE
-const sh_opcode_info sh_table[] =
+static const sh_opcode_info sh_table[] =
{
/* 0111nnnni8*1.... add #<imm>,<REG_N> */{"add",{A_IMM,A_REG_N},{HEX_7,REG_N,IMM0_8}, arch_sh1_up},
diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c
index cd291d32f2..c1da2d78e7 100644
--- a/fsdev/virtfs-proxy-helper.c
+++ b/fsdev/virtfs-proxy-helper.c
@@ -749,24 +749,29 @@ static int proxy_socket(const char *path, uid_t uid, gid_t gid)
if (bind(sock, (struct sockaddr *)&proxy,
sizeof(struct sockaddr_un)) < 0) {
do_perror("bind");
- return -1;
+ goto error;
}
if (chown(proxy.sun_path, uid, gid) < 0) {
do_perror("chown");
- return -1;
+ goto error;
}
if (listen(sock, 1) < 0) {
do_perror("listen");
- return -1;
+ goto error;
}
size = sizeof(qemu);
client = accept(sock, (struct sockaddr *)&qemu, &size);
if (client < 0) {
do_perror("accept");
- return -1;
+ goto error;
}
+ close(sock);
return client;
+
+error:
+ close(sock);
+ return -1;
}
static void usage(char *prog)
diff --git a/hmp.c b/hmp.c
index a42c5c07be..3825b296d2 100644
--- a/hmp.c
+++ b/hmp.c
@@ -16,6 +16,7 @@
#include "hmp.h"
#include "net/net.h"
#include "sysemu/char.h"
+#include "sysemu/block-backend.h"
#include "qemu/option.h"
#include "qemu/timer.h"
#include "qmp-commands.h"
@@ -474,6 +475,8 @@ void hmp_info_blockstats(Monitor *mon, const QDict *qdict)
" wr_total_time_ns=%" PRId64
" rd_total_time_ns=%" PRId64
" flush_total_time_ns=%" PRId64
+ " rd_merged=%" PRId64
+ " wr_merged=%" PRId64
"\n",
stats->value->stats->rd_bytes,
stats->value->stats->wr_bytes,
@@ -482,7 +485,9 @@ void hmp_info_blockstats(Monitor *mon, const QDict *qdict)
stats->value->stats->flush_operations,
stats->value->stats->wr_total_time_ns,
stats->value->stats->rd_total_time_ns,
- stats->value->stats->flush_total_time_ns);
+ stats->value->stats->flush_total_time_ns,
+ stats->value->stats->rd_merged,
+ stats->value->stats->wr_merged);
}
qapi_free_BlockStatsList(stats_list);
@@ -1716,14 +1721,14 @@ void hmp_chardev_remove(Monitor *mon, const QDict *qdict)
void hmp_qemu_io(Monitor *mon, const QDict *qdict)
{
- BlockDriverState *bs;
+ BlockBackend *blk;
const char* device = qdict_get_str(qdict, "device");
const char* command = qdict_get_str(qdict, "command");
Error *err = NULL;
- bs = bdrv_find(device);
- if (bs) {
- qemuio_command(bs, command);
+ blk = blk_by_name(device);
+ if (blk) {
+ qemuio_command(blk, command);
} else {
error_set(&err, QERR_DEVICE_NOT_FOUND, device);
}
diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c
index 53100061d2..62af946105 100644
--- a/hw/alpha/typhoon.c
+++ b/hw/alpha/typhoon.c
@@ -920,7 +920,7 @@ PCIBus *typhoon_init(ram_addr_t ram_size, ISABus **isa_bus,
{
qemu_irq isa_pci_irq, *isa_irqs;
- *isa_bus = isa_bus_new(NULL, &s->pchip.reg_io);
+ *isa_bus = isa_bus_new(NULL, get_system_memory(), &s->pchip.reg_io);
isa_pci_irq = *qemu_allocate_irqs(typhoon_set_isa_irq, s, 1);
isa_irqs = i8259_init(*isa_bus, isa_pci_irq);
isa_bus_irqs(*isa_bus, isa_irqs);
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 34d9379032..69f51ac0da 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -42,6 +42,7 @@
#include "exec/address-spaces.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
+#include "hw/pci-host/gpex.h"
#define NUM_VIRTIO_TRANSPORTS 32
@@ -69,6 +70,7 @@ enum {
VIRT_MMIO,
VIRT_RTC,
VIRT_FW_CFG,
+ VIRT_PCIE,
};
typedef struct MemMapEntry {
@@ -129,13 +131,21 @@ static const MemMapEntry a15memmap[] = {
[VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
[VIRT_MMIO] = { 0x0a000000, 0x00000200 },
/* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
- /* 0x10000000 .. 0x40000000 reserved for PCI */
+ /*
+ * PCIE verbose map:
+ *
+ * MMIO window { 0x10000000, 0x2eff0000 },
+ * PIO window { 0x3eff0000, 0x00010000 },
+ * ECAM { 0x3f000000, 0x01000000 },
+ */
+ [VIRT_PCIE] = { 0x10000000, 0x30000000 },
[VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
};
static const int a15irqmap[] = {
[VIRT_UART] = 1,
[VIRT_RTC] = 2,
+ [VIRT_PCIE] = 3, /* ... to 6 */
[VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
};
@@ -312,7 +322,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
}
}
-static void fdt_add_gic_node(const VirtBoardInfo *vbi)
+static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
{
uint32_t gic_phandle;
@@ -331,9 +341,11 @@ static void fdt_add_gic_node(const VirtBoardInfo *vbi)
2, vbi->memmap[VIRT_GIC_CPU].base,
2, vbi->memmap[VIRT_GIC_CPU].size);
qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
+
+ return gic_phandle;
}
-static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
+static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
{
/* We create a standalone GIC v2 */
DeviceState *gicdev;
@@ -380,7 +392,7 @@ static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
pic[i] = qdev_get_gpio_in(gicdev, i);
}
- fdt_add_gic_node(vbi);
+ return fdt_add_gic_node(vbi);
}
static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
@@ -585,6 +597,119 @@ static void create_fw_cfg(const VirtBoardInfo *vbi)
g_free(nodename);
}
+static void create_pcie_irq_map(const VirtBoardInfo *vbi, uint32_t gic_phandle,
+ int first_irq, const char *nodename)
+{
+ int devfn, pin;
+ uint32_t full_irq_map[4 * 4 * 8] = { 0 };
+ uint32_t *irq_map = full_irq_map;
+
+ for (devfn = 0; devfn <= 0x18; devfn += 0x8) {
+ for (pin = 0; pin < 4; pin++) {
+ int irq_type = GIC_FDT_IRQ_TYPE_SPI;
+ int irq_nr = first_irq + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS);
+ int irq_level = GIC_FDT_IRQ_FLAGS_LEVEL_HI;
+ int i;
+
+ uint32_t map[] = {
+ devfn << 8, 0, 0, /* devfn */
+ pin + 1, /* PCI pin */
+ gic_phandle, irq_type, irq_nr, irq_level }; /* GIC irq */
+
+ /* Convert map to big endian */
+ for (i = 0; i < 8; i++) {
+ irq_map[i] = cpu_to_be32(map[i]);
+ }
+ irq_map += 8;
+ }
+ }
+
+ qemu_fdt_setprop(vbi->fdt, nodename, "interrupt-map",
+ full_irq_map, sizeof(full_irq_map));
+
+ qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
+ 0x1800, 0, 0, /* devfn (PCI_SLOT(3)) */
+ 0x7 /* PCI irq */);
+}
+
+static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
+ uint32_t gic_phandle)
+{
+ hwaddr base = vbi->memmap[VIRT_PCIE].base;
+ hwaddr size = vbi->memmap[VIRT_PCIE].size;
+ hwaddr end = base + size;
+ hwaddr size_mmio;
+ hwaddr size_ioport = 64 * 1024;
+ int nr_pcie_buses = 16;
+ hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN * nr_pcie_buses;
+ hwaddr base_mmio = base;
+ hwaddr base_ioport;
+ hwaddr base_ecam;
+ int irq = vbi->irqmap[VIRT_PCIE];
+ MemoryRegion *mmio_alias;
+ MemoryRegion *mmio_reg;
+ MemoryRegion *ecam_alias;
+ MemoryRegion *ecam_reg;
+ DeviceState *dev;
+ char *nodename;
+ int i;
+
+ base_ecam = QEMU_ALIGN_DOWN(end - size_ecam, size_ecam);
+ base_ioport = QEMU_ALIGN_DOWN(base_ecam - size_ioport, size_ioport);
+ size_mmio = base_ioport - base;
+
+ dev = qdev_create(NULL, TYPE_GPEX_HOST);
+ qdev_init_nofail(dev);
+
+ /* Map only the first size_ecam bytes of ECAM space */
+ ecam_alias = g_new0(MemoryRegion, 1);
+ ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
+ memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam",
+ ecam_reg, 0, size_ecam);
+ memory_region_add_subregion(get_system_memory(), base_ecam, ecam_alias);
+
+ /* Map the MMIO window into system address space so as to expose
+ * the section of PCI MMIO space which starts at the same base address
+ * (ie 1:1 mapping for that part of PCI MMIO space visible through
+ * the window).
+ */
+ mmio_alias = g_new0(MemoryRegion, 1);
+ mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
+ memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
+ mmio_reg, base_mmio, size_mmio);
+ memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
+
+ /* Map IO port space */
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
+
+ for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
+ }
+
+ nodename = g_strdup_printf("/pcie@%" PRIx64, base);
+ qemu_fdt_add_subnode(vbi->fdt, nodename);
+ qemu_fdt_setprop_string(vbi->fdt, nodename,
+ "compatible", "pci-host-ecam-generic");
+ qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
+ qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0,
+ nr_pcie_buses - 1);
+
+ qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ 2, base_ecam, 2, size_ecam);
+ qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
+ 1, FDT_PCI_RANGE_IOPORT, 2, 0,
+ 2, base_ioport, 2, size_ioport,
+ 1, FDT_PCI_RANGE_MMIO, 2, base_mmio,
+ 2, base_mmio, 2, size_mmio);
+
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
+ create_pcie_irq_map(vbi, gic_phandle, irq, nodename);
+
+ g_free(nodename);
+}
+
static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
{
const VirtBoardInfo *board = (const VirtBoardInfo *)binfo;
@@ -602,15 +727,20 @@ static void machvirt_init(MachineState *machine)
MemoryRegion *ram = g_new(MemoryRegion, 1);
const char *cpu_model = machine->cpu_model;
VirtBoardInfo *vbi;
+ uint32_t gic_phandle;
+ char **cpustr;
if (!cpu_model) {
cpu_model = "cortex-a15";
}
- vbi = find_machine_info(cpu_model);
+ /* Separate the actual CPU model name from any appended features */
+ cpustr = g_strsplit(cpu_model, ",", 2);
+
+ vbi = find_machine_info(cpustr[0]);
if (!vbi) {
- error_report("mach-virt: CPU %s not supported", cpu_model);
+ error_report("mach-virt: CPU %s not supported", cpustr[0]);
exit(1);
}
@@ -624,8 +754,10 @@ static void machvirt_init(MachineState *machine)
create_fdt(vbi);
for (n = 0; n < smp_cpus; n++) {
- ObjectClass *oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
+ ObjectClass *oc = cpu_class_by_name(TYPE_ARM_CPU, cpustr[0]);
+ CPUClass *cc = CPU_CLASS(oc);
Object *cpuobj;
+ Error *err = NULL;
if (!oc) {
fprintf(stderr, "Unable to find CPU definition\n");
@@ -633,6 +765,13 @@ static void machvirt_init(MachineState *machine)
}
cpuobj = object_new(object_class_get_name(oc));
+ /* Handle any CPU options specified by the user */
+ cc->parse_features(CPU(cpuobj), cpustr[1], &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ exit(1);
+ }
+
if (!vms->secure) {
object_property_set_bool(cpuobj, false, "has_el3", NULL);
}
@@ -652,6 +791,7 @@ static void machvirt_init(MachineState *machine)
object_property_set_bool(cpuobj, true, "realized", NULL);
}
+ g_strfreev(cpustr);
fdt_add_timer_nodes(vbi);
fdt_add_cpu_nodes(vbi);
fdt_add_psci_node(vbi);
@@ -663,12 +803,14 @@ static void machvirt_init(MachineState *machine)
create_flash(vbi);
- create_gic(vbi, pic);
+ gic_phandle = create_gic(vbi, pic);
create_uart(vbi, pic);
create_rtc(vbi, pic);
+ create_pcie(vbi, pic, gic_phandle);
+
/* Create mmio transports, so the user can create virtio backends
* (which will be automatically plugged in to the transports). If
* no backend is created the transport will just sit harmlessly idle.
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 39c5d7103c..cd41478b08 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -16,7 +16,9 @@
#include "qemu/iov.h"
#include "qemu/thread.h"
#include "qemu/error-report.h"
+#include "hw/virtio/virtio-access.h"
#include "hw/virtio/dataplane/vring.h"
+#include "hw/virtio/dataplane/vring-accessors.h"
#include "sysemu/block-backend.h"
#include "hw/virtio/virtio-blk.h"
#include "virtio-blk.h"
@@ -75,7 +77,7 @@ static void complete_request_vring(VirtIOBlockReq *req, unsigned char status)
VirtIOBlockDataPlane *s = req->dev->dataplane;
stb_p(&req->in->status, status);
- vring_push(&req->dev->dataplane->vring, &req->elem,
+ vring_push(s->vdev, &req->dev->dataplane->vring, &req->elem,
req->qiov.size + sizeof(*req->in));
/* Suppress notification to guest by BH and its scheduled
@@ -96,9 +98,7 @@ static void handle_notify(EventNotifier *e)
event_notifier_test_and_clear(&s->host_notifier);
blk_io_plug(s->conf->conf.blk);
for (;;) {
- MultiReqBuffer mrb = {
- .num_writes = 0,
- };
+ MultiReqBuffer mrb = {};
int ret;
/* Disable guest->host notifies to avoid unnecessary vmexits */
@@ -120,7 +120,9 @@ static void handle_notify(EventNotifier *e)
virtio_blk_handle_request(req, &mrb);
}
- virtio_submit_multiwrite(s->conf->conf.blk, &mrb);
+ if (mrb.num_reqs) {
+ virtio_blk_submit_multireq(s->conf->conf.blk, &mrb);
+ }
if (likely(ret == -EAGAIN)) { /* vring emptied */
/* Re-enable guest->host notifies and stop processing the vring.
diff --git a/hw/block/onenand.c b/hw/block/onenand.c
index 348630d905..1b2c893756 100644
--- a/hw/block/onenand.c
+++ b/hw/block/onenand.c
@@ -346,15 +346,9 @@ static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn,
static inline int onenand_erase(OneNANDState *s, int sec, int num)
{
uint8_t *blankbuf, *tmpbuf;
+
blankbuf = g_malloc(512);
- if (!blankbuf) {
- return 1;
- }
tmpbuf = g_malloc(512);
- if (!tmpbuf) {
- g_free(blankbuf);
- return 1;
- }
memset(blankbuf, 0xff, 512);
for (; num > 0; num--, sec++) {
if (s->blk_cur) {
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 4032fcae27..1a8a176dcc 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -34,6 +34,7 @@ VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
req->dev = s;
req->qiov.size = 0;
req->next = NULL;
+ req->mr_next = NULL;
return req;
}
@@ -84,20 +85,32 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
static void virtio_blk_rw_complete(void *opaque, int ret)
{
- VirtIOBlockReq *req = opaque;
+ VirtIOBlockReq *next = opaque;
- trace_virtio_blk_rw_complete(req, ret);
+ while (next) {
+ VirtIOBlockReq *req = next;
+ next = req->mr_next;
+ trace_virtio_blk_rw_complete(req, ret);
- if (ret) {
- int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
- bool is_read = !(p & VIRTIO_BLK_T_OUT);
- if (virtio_blk_handle_rw_error(req, -ret, is_read))
- return;
- }
+ if (req->qiov.nalloc != -1) {
+ /* If nalloc is != 1 req->qiov is a local copy of the original
+ * external iovec. It was allocated in submit_merged_requests
+ * to be able to merge requests. */
+ qemu_iovec_destroy(&req->qiov);
+ }
- virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
- block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
- virtio_blk_free_request(req);
+ if (ret) {
+ int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
+ bool is_read = !(p & VIRTIO_BLK_T_OUT);
+ if (virtio_blk_handle_rw_error(req, -ret, is_read)) {
+ continue;
+ }
+ }
+
+ virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
+ block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
+ virtio_blk_free_request(req);
+ }
}
static void virtio_blk_flush_complete(void *opaque, int ret)
@@ -291,24 +304,127 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
}
}
-void virtio_submit_multiwrite(BlockBackend *blk, MultiReqBuffer *mrb)
+static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb,
+ int start, int num_reqs, int niov)
{
- int i, ret;
+ QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
+ int64_t sector_num = mrb->reqs[start]->sector_num;
+ int nb_sectors = mrb->reqs[start]->qiov.size / BDRV_SECTOR_SIZE;
+ bool is_write = mrb->is_write;
+
+ if (num_reqs > 1) {
+ int i;
+ struct iovec *tmp_iov = qiov->iov;
+ int tmp_niov = qiov->niov;
+
+ /* mrb->reqs[start]->qiov was initialized from external so we can't
+ * modifiy it here. We need to initialize it locally and then add the
+ * external iovecs. */
+ qemu_iovec_init(qiov, niov);
+
+ for (i = 0; i < tmp_niov; i++) {
+ qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len);
+ }
- if (!mrb->num_writes) {
+ for (i = start + 1; i < start + num_reqs; i++) {
+ qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
+ mrb->reqs[i]->qiov.size);
+ mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
+ nb_sectors += mrb->reqs[i]->qiov.size / BDRV_SECTOR_SIZE;
+ }
+ assert(nb_sectors == qiov->size / BDRV_SECTOR_SIZE);
+
+ trace_virtio_blk_submit_multireq(mrb, start, num_reqs, sector_num,
+ nb_sectors, is_write);
+ block_acct_merge_done(blk_get_stats(blk),
+ is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
+ num_reqs - 1);
+ }
+
+ if (is_write) {
+ blk_aio_writev(blk, sector_num, qiov, nb_sectors,
+ virtio_blk_rw_complete, mrb->reqs[start]);
+ } else {
+ blk_aio_readv(blk, sector_num, qiov, nb_sectors,
+ virtio_blk_rw_complete, mrb->reqs[start]);
+ }
+}
+
+static int multireq_compare(const void *a, const void *b)
+{
+ const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a,
+ *req2 = *(VirtIOBlockReq **)b;
+
+ /*
+ * Note that we can't simply subtract sector_num1 from sector_num2
+ * here as that could overflow the return value.
+ */
+ if (req1->sector_num > req2->sector_num) {
+ return 1;
+ } else if (req1->sector_num < req2->sector_num) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
+{
+ int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
+ int max_xfer_len = 0;
+ int64_t sector_num = 0;
+
+ if (mrb->num_reqs == 1) {
+ submit_requests(blk, mrb, 0, 1, -1);
+ mrb->num_reqs = 0;
return;
}
- ret = blk_aio_multiwrite(blk, mrb->blkreq, mrb->num_writes);
- if (ret != 0) {
- for (i = 0; i < mrb->num_writes; i++) {
- if (mrb->blkreq[i].error) {
- virtio_blk_rw_complete(mrb->blkreq[i].opaque, -EIO);
+ max_xfer_len = blk_get_max_transfer_length(mrb->reqs[0]->dev->blk);
+ max_xfer_len = MIN_NON_ZERO(max_xfer_len, BDRV_REQUEST_MAX_SECTORS);
+
+ qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
+ &multireq_compare);
+
+ for (i = 0; i < mrb->num_reqs; i++) {
+ VirtIOBlockReq *req = mrb->reqs[i];
+ if (num_reqs > 0) {
+ bool merge = true;
+
+ /* merge would exceed maximum number of IOVs */
+ if (niov + req->qiov.niov > IOV_MAX) {
+ merge = false;
+ }
+
+ /* merge would exceed maximum transfer length of backend device */
+ if (req->qiov.size / BDRV_SECTOR_SIZE + nb_sectors > max_xfer_len) {
+ merge = false;
+ }
+
+ /* requests are not sequential */
+ if (sector_num + nb_sectors != req->sector_num) {
+ merge = false;
+ }
+
+ if (!merge) {
+ submit_requests(blk, mrb, start, num_reqs, niov);
+ num_reqs = 0;
}
}
+
+ if (num_reqs == 0) {
+ sector_num = req->sector_num;
+ nb_sectors = niov = 0;
+ start = i;
+ }
+
+ nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE;
+ niov += req->qiov.niov;
+ num_reqs++;
}
- mrb->num_writes = 0;
+ submit_requests(blk, mrb, start, num_reqs, niov);
+ mrb->num_reqs = 0;
}
static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
@@ -319,7 +435,9 @@ static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
/*
* Make sure all outstanding writes are posted to the backing device.
*/
- virtio_submit_multiwrite(req->dev->blk, mrb);
+ if (mrb->is_write && mrb->num_reqs > 0) {
+ virtio_blk_submit_multireq(req->dev->blk, mrb);
+ }
blk_aio_flush(req->dev->blk, virtio_blk_flush_complete, req);
}
@@ -329,6 +447,9 @@ static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
uint64_t total_sectors;
+ if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
+ return false;
+ }
if (sector & dev->sector_mask) {
return false;
}
@@ -342,60 +463,6 @@ static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
return true;
}
-static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb)
-{
- BlockRequest *blkreq;
- uint64_t sector;
-
- sector = virtio_ldq_p(VIRTIO_DEVICE(req->dev), &req->out.sector);
-
- trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512);
-
- if (!virtio_blk_sect_range_ok(req->dev, sector, req->qiov.size)) {
- virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
- virtio_blk_free_request(req);
- return;
- }
-
- block_acct_start(blk_get_stats(req->dev->blk), &req->acct, req->qiov.size,
- BLOCK_ACCT_WRITE);
-
- if (mrb->num_writes == 32) {
- virtio_submit_multiwrite(req->dev->blk, mrb);
- }
-
- blkreq = &mrb->blkreq[mrb->num_writes];
- blkreq->sector = sector;
- blkreq->nb_sectors = req->qiov.size / BDRV_SECTOR_SIZE;
- blkreq->qiov = &req->qiov;
- blkreq->cb = virtio_blk_rw_complete;
- blkreq->opaque = req;
- blkreq->error = 0;
-
- mrb->num_writes++;
-}
-
-static void virtio_blk_handle_read(VirtIOBlockReq *req)
-{
- uint64_t sector;
-
- sector = virtio_ldq_p(VIRTIO_DEVICE(req->dev), &req->out.sector);
-
- trace_virtio_blk_handle_read(req, sector, req->qiov.size / 512);
-
- if (!virtio_blk_sect_range_ok(req->dev, sector, req->qiov.size)) {
- virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
- virtio_blk_free_request(req);
- return;
- }
-
- block_acct_start(blk_get_stats(req->dev->blk), &req->acct, req->qiov.size,
- BLOCK_ACCT_READ);
- blk_aio_readv(req->dev->blk, sector, &req->qiov,
- req->qiov.size / BDRV_SECTOR_SIZE,
- virtio_blk_rw_complete, req);
-}
-
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
uint32_t type;
@@ -430,11 +497,58 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
type = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
- if (type & VIRTIO_BLK_T_FLUSH) {
+ /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
+ * is an optional flag. Altough a guest should not send this flag if
+ * not negotiated we ignored it in the past. So keep ignoring it. */
+ switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) {
+ case VIRTIO_BLK_T_IN:
+ {
+ bool is_write = type & VIRTIO_BLK_T_OUT;
+ req->sector_num = virtio_ldq_p(VIRTIO_DEVICE(req->dev),
+ &req->out.sector);
+
+ if (is_write) {
+ qemu_iovec_init_external(&req->qiov, iov, out_num);
+ trace_virtio_blk_handle_write(req, req->sector_num,
+ req->qiov.size / BDRV_SECTOR_SIZE);
+ } else {
+ qemu_iovec_init_external(&req->qiov, in_iov, in_num);
+ trace_virtio_blk_handle_read(req, req->sector_num,
+ req->qiov.size / BDRV_SECTOR_SIZE);
+ }
+
+ if (!virtio_blk_sect_range_ok(req->dev, req->sector_num,
+ req->qiov.size)) {
+ virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
+ virtio_blk_free_request(req);
+ return;
+ }
+
+ block_acct_start(blk_get_stats(req->dev->blk),
+ &req->acct, req->qiov.size,
+ is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
+
+ /* merge would exceed maximum number of requests or IO direction
+ * changes */
+ if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
+ is_write != mrb->is_write ||
+ !req->dev->conf.request_merging)) {
+ virtio_blk_submit_multireq(req->dev->blk, mrb);
+ }
+
+ assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
+ mrb->reqs[mrb->num_reqs++] = req;
+ mrb->is_write = is_write;
+ break;
+ }
+ case VIRTIO_BLK_T_FLUSH:
virtio_blk_handle_flush(req, mrb);
- } else if (type & VIRTIO_BLK_T_SCSI_CMD) {
+ break;
+ case VIRTIO_BLK_T_SCSI_CMD:
virtio_blk_handle_scsi(req);
- } else if (type & VIRTIO_BLK_T_GET_ID) {
+ break;
+ case VIRTIO_BLK_T_GET_ID:
+ {
VirtIOBlock *s = req->dev;
/*
@@ -448,14 +562,9 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
iov_from_buf(in_iov, in_num, 0, serial, size);
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
virtio_blk_free_request(req);
- } else if (type & VIRTIO_BLK_T_OUT) {
- qemu_iovec_init_external(&req->qiov, iov, out_num);
- virtio_blk_handle_write(req, mrb);
- } else if (type == VIRTIO_BLK_T_IN || type == VIRTIO_BLK_T_BARRIER) {
- /* VIRTIO_BLK_T_IN is 0, so we can't just & it. */
- qemu_iovec_init_external(&req->qiov, in_iov, in_num);
- virtio_blk_handle_read(req);
- } else {
+ break;
+ }
+ default:
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
virtio_blk_free_request(req);
}
@@ -465,9 +574,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
VirtIOBlockReq *req;
- MultiReqBuffer mrb = {
- .num_writes = 0,
- };
+ MultiReqBuffer mrb = {};
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
* dataplane here instead of waiting for .set_status().
@@ -481,7 +588,9 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
virtio_blk_handle_request(req, &mrb);
}
- virtio_submit_multiwrite(s->blk, &mrb);
+ if (mrb.num_reqs) {
+ virtio_blk_submit_multireq(s->blk, &mrb);
+ }
/*
* FIXME: Want to check for completions before returning to guest mode,
@@ -494,9 +603,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
{
VirtIOBlock *s = opaque;
VirtIOBlockReq *req = s->rq;
- MultiReqBuffer mrb = {
- .num_writes = 0,
- };
+ MultiReqBuffer mrb = {};
qemu_bh_delete(s->bh);
s->bh = NULL;
@@ -509,7 +616,9 @@ static void virtio_blk_dma_restart_bh(void *opaque)
req = next;
}
- virtio_submit_multiwrite(s->blk, &mrb);
+ if (mrb.num_reqs) {
+ virtio_blk_submit_multireq(s->blk, &mrb);
+ }
}
static void virtio_blk_dma_restart_cb(void *opaque, int running,
@@ -842,6 +951,8 @@ static Property virtio_blk_properties[] = {
#ifdef __linux__
DEFINE_PROP_BIT("scsi", VirtIOBlock, conf.scsi, 0, true),
#endif
+ DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
+ true),
DEFINE_PROP_BIT("x-data-plane", VirtIOBlock, conf.data_plane, 0, false),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 21842a01e7..267d8a8c70 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -40,6 +40,8 @@
#include "xen_blkif.h"
#include "sysemu/blockdev.h"
#include "sysemu/block-backend.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/qmp/qstring.h"
/* ------------------------------------------------------------- */
@@ -897,30 +899,23 @@ static int blk_connect(struct XenDevice *xendev)
blkdev->dinfo = drive_get(IF_XEN, 0, index);
if (!blkdev->dinfo) {
Error *local_err = NULL;
- BlockBackend *blk;
- BlockDriver *drv;
- BlockDriverState *bs;
+ QDict *options = NULL;
- /* setup via xenbus -> create new block driver instance */
- xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
- blk = blk_new_with_bs(blkdev->dev, NULL);
- if (!blk) {
- return -1;
+ if (strcmp(blkdev->fileproto, "<unset>")) {
+ options = qdict_new();
+ qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
}
- blkdev->blk = blk;
- bs = blk_bs(blk);
- drv = bdrv_find_whitelisted_format(blkdev->fileproto, readonly);
- if (bdrv_open(&bs, blkdev->filename, NULL, NULL, qflags,
- drv, &local_err) != 0) {
+ /* setup via xenbus -> create new block driver instance */
+ xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
+ blkdev->blk = blk_new_open(blkdev->dev, blkdev->filename, NULL, options,
+ qflags, &local_err);
+ if (!blkdev->blk) {
xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
error_get_pretty(local_err));
error_free(local_err);
- blk_unref(blk);
- blkdev->blk = NULL;
return -1;
}
- assert(bs == blk_bs(blk));
} else {
/* setup via qemu cmdline -> already setup for us */
xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
diff --git a/hw/char/serial.c b/hw/char/serial.c
index bd25c03bea..04918979d8 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -658,7 +658,7 @@ static bool serial_thr_ipending_needed(void *opaque)
}
}
-const VMStateDescription vmstate_serial_thr_ipending = {
+static const VMStateDescription vmstate_serial_thr_ipending = {
.name = "serial/thr_ipending",
.version_id = 1,
.minimum_version_id = 1,
@@ -674,7 +674,7 @@ static bool serial_tsr_needed(void *opaque)
return s->tsr_retry != 0;
}
-const VMStateDescription vmstate_serial_tsr = {
+static const VMStateDescription vmstate_serial_tsr = {
.name = "serial/tsr",
.version_id = 1,
.minimum_version_id = 1,
@@ -693,7 +693,7 @@ static bool serial_recv_fifo_needed(void *opaque)
}
-const VMStateDescription vmstate_serial_recv_fifo = {
+static const VMStateDescription vmstate_serial_recv_fifo = {
.name = "serial/recv_fifo",
.version_id = 1,
.minimum_version_id = 1,
@@ -709,7 +709,7 @@ static bool serial_xmit_fifo_needed(void *opaque)
return !fifo8_is_empty(&s->xmit_fifo);
}
-const VMStateDescription vmstate_serial_xmit_fifo = {
+static const VMStateDescription vmstate_serial_xmit_fifo = {
.name = "serial/xmit_fifo",
.version_id = 1,
.minimum_version_id = 1,
@@ -725,7 +725,7 @@ static bool serial_fifo_timeout_timer_needed(void *opaque)
return timer_pending(s->fifo_timeout_timer);
}
-const VMStateDescription vmstate_serial_fifo_timeout_timer = {
+static const VMStateDescription vmstate_serial_fifo_timeout_timer = {
.name = "serial/fifo_timeout_timer",
.version_id = 1,
.minimum_version_id = 1,
@@ -741,7 +741,7 @@ static bool serial_timeout_ipending_needed(void *opaque)
return s->timeout_ipending != 0;
}
-const VMStateDescription vmstate_serial_timeout_ipending = {
+static const VMStateDescription vmstate_serial_timeout_ipending = {
.name = "serial/timeout_ipending",
.version_id = 1,
.minimum_version_id = 1,
@@ -757,7 +757,7 @@ static bool serial_poll_needed(void *opaque)
return s->poll_msl >= 0;
}
-const VMStateDescription vmstate_serial_poll = {
+static const VMStateDescription vmstate_serial_poll = {
.name = "serial/poll",
.version_id = 1,
.minimum_version_id = 1,
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index 37a6f442fa..47fbb34bdd 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -26,7 +26,7 @@
#include "hw/virtio/virtio-serial.h"
#include "hw/virtio/virtio-access.h"
-struct VirtIOSerialDevices {
+static struct VirtIOSerialDevices {
QLIST_HEAD(, VirtIOSerial) devices;
} vserdevices;
diff --git a/hw/core/fw-path-provider.c b/hw/core/fw-path-provider.c
index 1290c3e600..7442d322d5 100644
--- a/hw/core/fw-path-provider.c
+++ b/hw/core/fw-path-provider.c
@@ -1,5 +1,5 @@
/*
- * Firmware patch provider class and helpers.
+ * Firmware path provider class and helpers.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 3a53f20392..ec923c8c4b 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -2907,7 +2907,7 @@ static void cirrus_init_common(CirrusVGAState *s, Object *owner,
bank, 1);
}
memory_region_add_subregion_overlap(system_memory,
- isa_mem_base + 0x000a0000,
+ 0x000a0000,
&s->low_mem_container,
1);
memory_region_set_coalescing(&s->low_mem);
diff --git a/hw/display/vga-isa.c b/hw/display/vga-isa.c
index 2b480bd44d..7f3c98941b 100644
--- a/hw/display/vga-isa.c
+++ b/hw/display/vga-isa.c
@@ -64,7 +64,7 @@ static void vga_isa_realizefn(DeviceState *dev, Error **errp)
isa_register_portio_list(isadev, 0x1ce, vbe_ports, s, "vbe");
}
memory_region_add_subregion_overlap(isa_address_space(isadev),
- isa_mem_base + 0x000a0000,
+ 0x000a0000,
vga_io_memory, 1);
memory_region_set_coalescing(vga_io_memory);
s->con = graphic_console_init(DEVICE(dev), 0, s->hw_ops, s);
diff --git a/hw/display/vga.c b/hw/display/vga.c
index ffcfce38a4..c8c49abc6e 100644
--- a/hw/display/vga.c
+++ b/hw/display/vga.c
@@ -177,7 +177,6 @@ static void vga_update_memory_access(VGACommonState *s)
size = 0x8000;
break;
}
- base += isa_mem_base;
memory_region_init_alias(&s->chain4_alias, memory_region_owner(&s->vram),
"vga.chain4", &s->vram, offset, size);
memory_region_add_subregion_overlap(s->legacy_address_space, base,
@@ -2032,7 +2031,7 @@ static bool vga_endian_state_needed(void *opaque)
return s->default_endian_fb != s->big_endian_fb;
}
-const VMStateDescription vmstate_vga_endian = {
+static const VMStateDescription vmstate_vga_endian = {
.name = "vga.endian",
.version_id = 1,
.minimum_version_id = 1,
@@ -2218,7 +2217,7 @@ void vga_init(VGACommonState *s, Object *obj, MemoryRegion *address_space,
vga_io_memory = vga_init_io(s, obj, &vga_ports, &vbe_ports);
memory_region_add_subregion_overlap(address_space,
- isa_mem_base + 0x000a0000,
+ 0x000a0000,
vga_io_memory,
1);
memory_region_set_coalescing(vga_io_memory);
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 8a61e959a6..5e324ef62d 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -45,6 +45,8 @@
#include <xen/io/kbdif.h>
#include <xen/io/protocols.h>
+#include "trace.h"
+
#ifndef BTN_LEFT
#define BTN_LEFT 0x110 /* from <linux/input.h> */
#endif
@@ -324,6 +326,8 @@ static void xenfb_mouse_event(void *opaque,
int dh = surface_height(surface);
int i;
+ trace_xenfb_mouse_event(opaque, dx, dy, dz, button_state,
+ xenfb->abs_pointer_wanted);
if (xenfb->abs_pointer_wanted)
xenfb_send_position(xenfb,
dx * (dw - 1) / 0x7fff,
@@ -380,6 +384,7 @@ static void input_connected(struct XenDevice *xendev)
if (in->qmouse) {
qemu_remove_mouse_event_handler(in->qmouse);
}
+ trace_xenfb_input_connected(xendev, in->abs_pointer_wanted);
in->qmouse = qemu_add_mouse_event_handler(xenfb_mouse_event, in,
in->abs_pointer_wanted,
"Xen PVFB Mouse");
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 38b42b05f8..de75cf0e87 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -208,7 +208,7 @@ static void pc_init1(MachineState *machine,
} else {
pci_bus = NULL;
i440fx_state = NULL;
- isa_bus = isa_bus_new(NULL, system_io);
+ isa_bus = isa_bus_new(NULL, get_system_memory(), system_io);
no_hpet = 1;
}
isa_bus_irqs(isa_bus, gsi);
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
index a71e6e014f..1bf8b34528 100644
--- a/hw/ide/atapi.c
+++ b/hw/ide/atapi.c
@@ -394,6 +394,23 @@ static void ide_atapi_cmd_read(IDEState *s, int lba, int nb_sectors,
}
}
+
+/* Called by *_restart_bh when the transfer function points
+ * to ide_atapi_cmd
+ */
+void ide_atapi_dma_restart(IDEState *s)
+{
+ /*
+ * I'm not sure we have enough stored to restart the command
+ * safely, so give the guest an error it should recover from.
+ * I'm assuming most guests will try to recover from something
+ * listed as a medium error on a CD; it seems to work on Linux.
+ * This would be more of a problem if we did any other type of
+ * DMA operation.
+ */
+ ide_atapi_cmd_error(s, MEDIUM_ERROR, ASC_NO_SEEK_COMPLETE);
+}
+
static inline uint8_t ide_atapi_set_profile(uint8_t *buf, uint8_t *index,
uint16_t profile)
{
diff --git a/hw/ide/core.c b/hw/ide/core.c
index d4af5e2eb1..ac3f015a8d 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -2417,6 +2417,7 @@ static int ide_drive_pio_post_load(void *opaque, int version_id)
s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
s->data_end = s->data_ptr + s->cur_io_buffer_len;
+ s->atapi_dma = s->feature & 1; /* as per cmd_packet */
return 0;
}
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index c998003bf3..ee9a57f039 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -289,6 +289,7 @@ typedef struct IDEDMAOps IDEDMAOps;
#define ATAPI_INT_REASON_TAG 0xf8
/* same constants as bochs */
+#define ASC_NO_SEEK_COMPLETE 0x02
#define ASC_ILLEGAL_OPCODE 0x20
#define ASC_LOGICAL_BLOCK_OOR 0x21
#define ASC_INV_FIELD_IN_CMD_PACKET 0x24
@@ -530,6 +531,7 @@ void ide_dma_error(IDEState *s);
void ide_atapi_cmd_ok(IDEState *s);
void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc);
+void ide_atapi_dma_restart(IDEState *s);
void ide_atapi_io_error(IDEState *s, int ret);
void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val);
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index bee5ad39fe..e3f2054a90 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -235,6 +235,17 @@ static void bmdma_restart_bh(void *opaque)
}
} else if (error_status & IDE_RETRY_FLUSH) {
ide_flush_cache(bmdma_active_if(bm));
+ } else {
+ IDEState *s = bmdma_active_if(bm);
+
+ /*
+ * We've not got any bits to tell us about ATAPI - but
+ * we do have the end_transfer_func that tells us what
+ * we're trying to do.
+ */
+ if (s->end_transfer_func == ide_atapi_cmd) {
+ ide_atapi_dma_restart(s);
+ }
}
}
diff --git a/hw/input/ps2.c b/hw/input/ps2.c
index a466e250a4..4baeea2b56 100644
--- a/hw/input/ps2.c
+++ b/hw/input/ps2.c
@@ -27,6 +27,8 @@
#include "ui/input.h"
#include "sysemu/sysemu.h"
+#include "trace.h"
+
/* debug PC keyboard */
//#define DEBUG_KBD
@@ -158,6 +160,7 @@ static void ps2_put_keycode(void *opaque, int keycode)
{
PS2KbdState *s = opaque;
+ trace_ps2_put_keycode(opaque, keycode);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
/* XXX: add support for scancode set 1 */
if (!s->translate && keycode < 0xe0 && s->scancode_set > 1) {
@@ -194,6 +197,7 @@ uint32_t ps2_read_data(void *opaque)
PS2Queue *q;
int val, index;
+ trace_ps2_read_data(opaque);
q = &s->queue;
if (q->count == 0) {
/* NOTE: if no data left, we return the last keyboard one
@@ -218,12 +222,14 @@ uint32_t ps2_read_data(void *opaque)
static void ps2_set_ledstate(PS2KbdState *s, int ledstate)
{
+ trace_ps2_set_ledstate(s, ledstate);
s->ledstate = ledstate;
kbd_put_ledstate(ledstate);
}
static void ps2_reset_keyboard(PS2KbdState *s)
{
+ trace_ps2_reset_keyboard(s);
s->scan_enabled = 1;
s->scancode_set = 2;
ps2_set_ledstate(s, 0);
@@ -233,6 +239,7 @@ void ps2_write_keyboard(void *opaque, int val)
{
PS2KbdState *s = (PS2KbdState *)opaque;
+ trace_ps2_write_keyboard(opaque, val);
switch(s->common.write_cmd) {
default:
case -1:
@@ -319,6 +326,7 @@ void ps2_write_keyboard(void *opaque, int val)
void ps2_keyboard_set_translation(void *opaque, int mode)
{
PS2KbdState *s = (PS2KbdState *)opaque;
+ trace_ps2_keyboard_set_translation(opaque, mode);
s->translate = mode;
}
@@ -364,6 +372,7 @@ static void ps2_mouse_send_packet(PS2MouseState *s)
break;
}
+ trace_ps2_mouse_send_packet(s, dx1, dy1, dz1, b);
/* update deltas */
s->mouse_dx -= dx1;
s->mouse_dy -= dy1;
@@ -433,6 +442,7 @@ static void ps2_mouse_sync(DeviceState *dev)
void ps2_mouse_fake_event(void *opaque)
{
PS2MouseState *s = opaque;
+ trace_ps2_mouse_fake_event(opaque);
s->mouse_dx++;
ps2_mouse_sync(opaque);
}
@@ -440,6 +450,8 @@ void ps2_mouse_fake_event(void *opaque)
void ps2_write_mouse(void *opaque, int val)
{
PS2MouseState *s = (PS2MouseState *)opaque;
+
+ trace_ps2_write_mouse(opaque, val);
#ifdef DEBUG_MOUSE
printf("kbd: write mouse 0x%02x\n", val);
#endif
@@ -606,6 +618,7 @@ static void ps2_kbd_reset(void *opaque)
{
PS2KbdState *s = (PS2KbdState *) opaque;
+ trace_ps2_kbd_reset(opaque);
ps2_common_reset(&s->common);
s->scan_enabled = 0;
s->translate = 0;
@@ -616,6 +629,7 @@ static void ps2_mouse_reset(void *opaque)
{
PS2MouseState *s = (PS2MouseState *) opaque;
+ trace_ps2_mouse_reset(opaque);
ps2_common_reset(&s->common);
s->mouse_status = 0;
s->mouse_resolution = 0;
@@ -763,6 +777,7 @@ void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg)
{
PS2KbdState *s = (PS2KbdState *)g_malloc0(sizeof(PS2KbdState));
+ trace_ps2_kbd_init(s);
s->common.update_irq = update_irq;
s->common.update_arg = update_arg;
s->scancode_set = 2;
@@ -784,6 +799,7 @@ void *ps2_mouse_init(void (*update_irq)(void *, int), void *update_arg)
{
PS2MouseState *s = (PS2MouseState *)g_malloc0(sizeof(PS2MouseState));
+ trace_ps2_mouse_init(s);
s->common.update_irq = update_irq;
s->common.update_arg = update_arg;
vmstate_register(NULL, 0, &vmstate_ps2_mouse, s);
diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c
index a7d9aa6da1..0dc440df5c 100644
--- a/hw/isa/i82378.c
+++ b/hw/isa/i82378.c
@@ -75,7 +75,8 @@ static int i82378_initfn(PCIDevice *pci)
pci_config_set_interrupt_pin(pci_conf, 1); /* interrupt pin 0 */
- isabus = isa_bus_new(dev, pci_address_space_io(pci));
+ isabus = isa_bus_new(dev, get_system_memory(),
+ pci_address_space_io(pci));
/* This device has:
2 82C59 (irq)
diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c
index cc85e538b1..825aa627df 100644
--- a/hw/isa/isa-bus.c
+++ b/hw/isa/isa-bus.c
@@ -21,10 +21,8 @@
#include "hw/sysbus.h"
#include "sysemu/sysemu.h"
#include "hw/isa/isa.h"
-#include "exec/address-spaces.h"
static ISABus *isabus;
-hwaddr isa_mem_base = 0;
static void isabus_dev_print(Monitor *mon, DeviceState *dev, int indent);
static char *isabus_get_fw_dev_path(DeviceState *dev);
@@ -44,7 +42,8 @@ static const TypeInfo isa_bus_info = {
.class_init = isa_bus_class_init,
};
-ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space_io)
+ISABus *isa_bus_new(DeviceState *dev, MemoryRegion* address_space,
+ MemoryRegion *address_space_io)
{
if (isabus) {
fprintf(stderr, "Can't create a second ISA bus\n");
@@ -56,6 +55,7 @@ ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space_io)
}
isabus = ISA_BUS(qbus_create(TYPE_ISA_BUS, dev, NULL));
+ isabus->address_space = address_space;
isabus->address_space_io = address_space_io;
return isabus;
}
@@ -250,7 +250,11 @@ static char *isabus_get_fw_dev_path(DeviceState *dev)
MemoryRegion *isa_address_space(ISADevice *dev)
{
- return get_system_memory();
+ if (dev) {
+ return isa_bus_from_device(dev)->address_space;
+ }
+
+ return isabus->address_space;
}
MemoryRegion *isa_address_space_io(ISADevice *dev)
diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c
index 530b074551..231de74414 100644
--- a/hw/isa/lpc_ich9.c
+++ b/hw/isa/lpc_ich9.c
@@ -575,7 +575,7 @@ static int ich9_lpc_init(PCIDevice *d)
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
ISABus *isa_bus;
- isa_bus = isa_bus_new(&d->qdev, get_system_io());
+ isa_bus = isa_bus_new(DEVICE(d), get_system_memory(), get_system_io());
pci_set_long(d->wmask + ICH9_LPC_PMBASE,
ICH9_LPC_PMBASE_BASE_ADDRESS_MASK);
diff --git a/hw/isa/piix4.c b/hw/isa/piix4.c
index 1aa17d7cf6..a9916df20a 100644
--- a/hw/isa/piix4.c
+++ b/hw/isa/piix4.c
@@ -86,7 +86,8 @@ static int piix4_initfn(PCIDevice *dev)
{
PIIX4State *d = DO_UPCAST(PIIX4State, dev, dev);
- isa_bus_new(&d->dev.qdev, pci_address_space_io(dev));
+ isa_bus_new(DEVICE(d), pci_address_space(dev),
+ pci_address_space_io(dev));
piix4_dev = &d->dev;
qemu_register_reset(piix4_reset, d);
return 0;
diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c
index 17510ce528..b223526bde 100644
--- a/hw/isa/vt82c686.c
+++ b/hw/isa/vt82c686.c
@@ -429,7 +429,8 @@ static int vt82c686b_initfn(PCIDevice *d)
uint8_t *wmask;
int i;
- isa_bus = isa_bus_new(&d->qdev, pci_address_space_io(d));
+ isa_bus = isa_bus_new(DEVICE(d), get_system_memory(),
+ pci_address_space_io(d));
pci_conf = d->config;
pci_config_set_prog_interface(pci_conf, 0x0);
diff --git a/hw/mips/gt64xxx_pci.c b/hw/mips/gt64xxx_pci.c
index 1f2fe5fab9..10fcca33f8 100644
--- a/hw/mips/gt64xxx_pci.c
+++ b/hw/mips/gt64xxx_pci.c
@@ -239,7 +239,11 @@ typedef struct GT64120State {
uint32_t regs[GT_REGS];
PCI_MAPPING_ENTRY(PCI0IO);
+ PCI_MAPPING_ENTRY(PCI0M0);
+ PCI_MAPPING_ENTRY(PCI0M1);
PCI_MAPPING_ENTRY(ISD);
+ MemoryRegion pci0_mem;
+ AddressSpace pci0_mem_as;
} GT64120State;
/* Adjust range to avoid touching space which isn't mappable via PCI */
@@ -290,25 +294,63 @@ static void gt64120_isd_mapping(GT64120State *s)
static void gt64120_pci_mapping(GT64120State *s)
{
- /* Update IO mapping */
- if ((s->regs[GT_PCI0IOLD] & 0x7f) <= s->regs[GT_PCI0IOHD])
- {
- /* Unmap old IO address */
- if (s->PCI0IO_length)
- {
- memory_region_del_subregion(get_system_memory(), &s->PCI0IO_mem);
- object_unparent(OBJECT(&s->PCI0IO_mem));
- }
- /* Map new IO address */
- s->PCI0IO_start = s->regs[GT_PCI0IOLD] << 21;
- s->PCI0IO_length = ((s->regs[GT_PCI0IOHD] + 1) - (s->regs[GT_PCI0IOLD] & 0x7f)) << 21;
- isa_mem_base = s->PCI0IO_start;
- if (s->PCI0IO_length) {
- memory_region_init_alias(&s->PCI0IO_mem, OBJECT(s), "isa_mmio",
- get_system_io(), 0, s->PCI0IO_length);
- memory_region_add_subregion(get_system_memory(), s->PCI0IO_start,
- &s->PCI0IO_mem);
- }
+ /* Update PCI0IO mapping */
+ if ((s->regs[GT_PCI0IOLD] & 0x7f) <= s->regs[GT_PCI0IOHD]) {
+ /* Unmap old IO address */
+ if (s->PCI0IO_length) {
+ memory_region_del_subregion(get_system_memory(), &s->PCI0IO_mem);
+ object_unparent(OBJECT(&s->PCI0IO_mem));
+ }
+ /* Map new IO address */
+ s->PCI0IO_start = s->regs[GT_PCI0IOLD] << 21;
+ s->PCI0IO_length = ((s->regs[GT_PCI0IOHD] + 1) -
+ (s->regs[GT_PCI0IOLD] & 0x7f)) << 21;
+ if (s->PCI0IO_length) {
+ memory_region_init_alias(&s->PCI0IO_mem, OBJECT(s), "pci0-io",
+ get_system_io(), 0, s->PCI0IO_length);
+ memory_region_add_subregion(get_system_memory(), s->PCI0IO_start,
+ &s->PCI0IO_mem);
+ }
+ }
+
+ /* Update PCI0M0 mapping */
+ if ((s->regs[GT_PCI0M0LD] & 0x7f) <= s->regs[GT_PCI0M0HD]) {
+ /* Unmap old MEM address */
+ if (s->PCI0M0_length) {
+ memory_region_del_subregion(get_system_memory(), &s->PCI0M0_mem);
+ object_unparent(OBJECT(&s->PCI0M0_mem));
+ }
+ /* Map new mem address */
+ s->PCI0M0_start = s->regs[GT_PCI0M0LD] << 21;
+ s->PCI0M0_length = ((s->regs[GT_PCI0M0HD] + 1) -
+ (s->regs[GT_PCI0M0LD] & 0x7f)) << 21;
+ if (s->PCI0M0_length) {
+ memory_region_init_alias(&s->PCI0M0_mem, OBJECT(s), "pci0-mem0",
+ &s->pci0_mem, s->PCI0M0_start,
+ s->PCI0M0_length);
+ memory_region_add_subregion(get_system_memory(), s->PCI0M0_start,
+ &s->PCI0M0_mem);
+ }
+ }
+
+ /* Update PCI0M1 mapping */
+ if ((s->regs[GT_PCI0M1LD] & 0x7f) <= s->regs[GT_PCI0M1HD]) {
+ /* Unmap old MEM address */
+ if (s->PCI0M1_length) {
+ memory_region_del_subregion(get_system_memory(), &s->PCI0M1_mem);
+ object_unparent(OBJECT(&s->PCI0M1_mem));
+ }
+ /* Map new mem address */
+ s->PCI0M1_start = s->regs[GT_PCI0M1LD] << 21;
+ s->PCI0M1_length = ((s->regs[GT_PCI0M1HD] + 1) -
+ (s->regs[GT_PCI0M1LD] & 0x7f)) << 21;
+ if (s->PCI0M1_length) {
+ memory_region_init_alias(&s->PCI0M1_mem, OBJECT(s), "pci0-mem1",
+ &s->pci0_mem, s->PCI0M1_start,
+ s->PCI0M1_length);
+ memory_region_add_subregion(get_system_memory(), s->PCI0M1_start,
+ &s->PCI0M1_mem);
+ }
}
}
@@ -363,10 +405,12 @@ static void gt64120_writel (void *opaque, hwaddr addr,
case GT_PCI0M0LD:
s->regs[GT_PCI0M0LD] = val & 0x00007fff;
s->regs[GT_PCI0M0REMAP] = val & 0x000007ff;
+ gt64120_pci_mapping(s);
break;
case GT_PCI0M1LD:
s->regs[GT_PCI0M1LD] = val & 0x00007fff;
s->regs[GT_PCI0M1REMAP] = val & 0x000007ff;
+ gt64120_pci_mapping(s);
break;
case GT_PCI1IOLD:
s->regs[GT_PCI1IOLD] = val & 0x00007fff;
@@ -380,12 +424,12 @@ static void gt64120_writel (void *opaque, hwaddr addr,
s->regs[GT_PCI1M1LD] = val & 0x00007fff;
s->regs[GT_PCI1M1REMAP] = val & 0x000007ff;
break;
+ case GT_PCI0M0HD:
+ case GT_PCI0M1HD:
case GT_PCI0IOHD:
s->regs[saddr] = val & 0x0000007f;
gt64120_pci_mapping(s);
break;
- case GT_PCI0M0HD:
- case GT_PCI0M1HD:
case GT_PCI1IOHD:
case GT_PCI1M0HD:
case GT_PCI1M1HD:
@@ -1124,10 +1168,12 @@ PCIBus *gt64120_register(qemu_irq *pic)
qdev_init_nofail(dev);
d = GT64120_PCI_HOST_BRIDGE(dev);
phb = PCI_HOST_BRIDGE(dev);
+ memory_region_init(&d->pci0_mem, OBJECT(dev), "pci0-mem", UINT32_MAX);
+ address_space_init(&d->pci0_mem_as, &d->pci0_mem, "pci0-mem");
phb->bus = pci_register_bus(dev, "pci",
gt64120_pci_set_irq, gt64120_pci_map_irq,
pic,
- get_system_memory(),
+ &d->pci0_mem,
get_system_io(),
PCI_DEVFN(18, 0), 4, TYPE_PCI_BUS);
memory_region_init_io(&d->ISD_mem, OBJECT(dev), &isd_mem_ops, d, "isd-mem", 0x1000);
@@ -1142,11 +1188,6 @@ static int gt64120_init(SysBusDevice *dev)
s = GT64120_PCI_HOST_BRIDGE(dev);
- /* FIXME: This value is computed from registers during reset, but some
- devices (e.g. VGA card) need to know it when they are registered.
- This also mean that changing the register to change the mapping
- does not fully work. */
- isa_mem_base = 0x10000000;
qemu_register_reset(gt64120_reset, s);
return 0;
}
diff --git a/hw/mips/mips_jazz.c b/hw/mips/mips_jazz.c
index 3f33093fd9..ef5dd7d5ab 100644
--- a/hw/mips/mips_jazz.c
+++ b/hw/mips/mips_jazz.c
@@ -60,13 +60,16 @@ static void main_cpu_reset(void *opaque)
static uint64_t rtc_read(void *opaque, hwaddr addr, unsigned size)
{
- return cpu_inw(0x71);
+ uint8_t val;
+ address_space_read(&address_space_memory, 0x90000071, &val, 1);
+ return val;
}
static void rtc_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
- cpu_outw(0x71, val & 0xff);
+ uint8_t buf = val & 0xff;
+ address_space_write(&address_space_memory, 0x90000071, &buf, 1);
}
static const MemoryRegionOps rtc_ops = {
@@ -120,12 +123,11 @@ static void mips_jazz_do_unassigned_access(CPUState *cpu, hwaddr addr,
(*real_do_unassigned_access)(cpu, addr, is_write, is_exec, opaque, size);
}
-static void mips_jazz_init(MemoryRegion *address_space,
- MemoryRegion *address_space_io,
- ram_addr_t ram_size,
- const char *cpu_model,
+static void mips_jazz_init(MachineState *machine,
enum jazz_model_e jazz_model)
{
+ MemoryRegion *address_space = get_system_memory();
+ const char *cpu_model = machine->cpu_model;
char *filename;
int bios_size, n;
MIPSCPU *cpu;
@@ -134,7 +136,8 @@ static void mips_jazz_init(MemoryRegion *address_space,
qemu_irq *rc4030, *i8259;
rc4030_dma *dmas;
void* rc4030_opaque;
- MemoryRegion *isa = g_new(MemoryRegion, 1);
+ MemoryRegion *isa_mem = g_new(MemoryRegion, 1);
+ MemoryRegion *isa_io = g_new(MemoryRegion, 1);
MemoryRegion *rtc = g_new(MemoryRegion, 1);
MemoryRegion *i8042 = g_new(MemoryRegion, 1);
MemoryRegion *dma_dummy = g_new(MemoryRegion, 1);
@@ -179,7 +182,8 @@ static void mips_jazz_init(MemoryRegion *address_space,
cc->do_unassigned_access = mips_jazz_do_unassigned_access;
/* allocate RAM */
- memory_region_init_ram(ram, NULL, "mips_jazz.ram", ram_size, &error_abort);
+ memory_region_init_ram(ram, NULL, "mips_jazz.ram", machine->ram_size,
+ &error_abort);
vmstate_register_ram_global(ram);
memory_region_add_subregion(address_space, 0, ram);
@@ -218,8 +222,14 @@ static void mips_jazz_init(MemoryRegion *address_space,
memory_region_init_io(dma_dummy, NULL, &dma_dummy_ops, NULL, "dummy_dma", 0x1000);
memory_region_add_subregion(address_space, 0x8000d000, dma_dummy);
+ /* ISA bus: IO space at 0x90000000, mem space at 0x91000000 */
+ memory_region_init(isa_io, NULL, "isa-io", 0x00010000);
+ memory_region_init(isa_mem, NULL, "isa-mem", 0x01000000);
+ memory_region_add_subregion(address_space, 0x90000000, isa_io);
+ memory_region_add_subregion(address_space, 0x91000000, isa_mem);
+ isa_bus = isa_bus_new(NULL, isa_mem, isa_io);
+
/* ISA devices */
- isa_bus = isa_bus_new(NULL, address_space_io);
i8259 = i8259_init(isa_bus, env->irq[4]);
isa_bus_irqs(isa_bus, i8259);
cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1);
@@ -227,12 +237,6 @@ static void mips_jazz_init(MemoryRegion *address_space,
pit = pit_init(isa_bus, 0x40, 0, NULL);
pcspk_init(isa_bus, pit);
- /* ISA IO space at 0x90000000 */
- memory_region_init_alias(isa, NULL, "isa_mmio",
- get_system_io(), 0, 0x01000000);
- memory_region_add_subregion(address_space, 0x90000000, isa);
- isa_mem_base = 0x11000000;
-
/* Video card */
switch (jazz_model) {
case JAZZ_MAGNUM:
@@ -333,19 +337,13 @@ static void mips_jazz_init(MemoryRegion *address_space,
static
void mips_magnum_init(MachineState *machine)
{
- ram_addr_t ram_size = machine->ram_size;
- const char *cpu_model = machine->cpu_model;
- mips_jazz_init(get_system_memory(), get_system_io(),
- ram_size, cpu_model, JAZZ_MAGNUM);
+ mips_jazz_init(machine, JAZZ_MAGNUM);
}
static
void mips_pica61_init(MachineState *machine)
{
- ram_addr_t ram_size = machine->ram_size;
- const char *cpu_model = machine->cpu_model;
- mips_jazz_init(get_system_memory(), get_system_io(),
- ram_size, cpu_model, JAZZ_PICA61);
+ mips_jazz_init(machine, JAZZ_PICA61);
}
static QEMUMachine mips_magnum_machine = {
diff --git a/hw/mips/mips_r4k.c b/hw/mips/mips_r4k.c
index a7fe0ceadf..3e90e273dc 100644
--- a/hw/mips/mips_r4k.c
+++ b/hw/mips/mips_r4k.c
@@ -165,7 +165,8 @@ void mips_r4k_init(MachineState *machine)
MemoryRegion *ram = g_new(MemoryRegion, 1);
MemoryRegion *bios;
MemoryRegion *iomem = g_new(MemoryRegion, 1);
- MemoryRegion *isa = g_new(MemoryRegion, 1);
+ MemoryRegion *isa_io = g_new(MemoryRegion, 1);
+ MemoryRegion *isa_mem = g_new(MemoryRegion, 1);
int bios_size;
MIPSCPU *cpu;
CPUMIPSState *env;
@@ -267,20 +268,20 @@ void mips_r4k_init(MachineState *machine)
cpu_mips_irq_init_cpu(env);
cpu_mips_clock_init(env);
+ /* ISA bus: IO space at 0x14000000, mem space at 0x10000000 */
+ memory_region_init_alias(isa_io, NULL, "isa-io",
+ get_system_io(), 0, 0x00010000);
+ memory_region_init(isa_mem, NULL, "isa-mem", 0x01000000);
+ memory_region_add_subregion(get_system_memory(), 0x14000000, isa_io);
+ memory_region_add_subregion(get_system_memory(), 0x10000000, isa_mem);
+ isa_bus = isa_bus_new(NULL, isa_mem, get_system_io());
+
/* The PIC is attached to the MIPS CPU INT0 pin */
- isa_bus = isa_bus_new(NULL, get_system_io());
i8259 = i8259_init(isa_bus, env->irq[2]);
isa_bus_irqs(isa_bus, i8259);
rtc_init(isa_bus, 2000, NULL);
- /* Register 64 KB of ISA IO space at 0x14000000 */
- memory_region_init_alias(isa, NULL, "isa_mmio",
- get_system_io(), 0, 0x00010000);
- memory_region_add_subregion(get_system_memory(), 0x14000000, isa);
-
- isa_mem_base = 0x10000000;
-
pit = pit_init(isa_bus, 0x40, 0, NULL);
for(i = 0; i < MAX_SERIAL_PORTS; i++) {
diff --git a/hw/net/pcnet-pci.c b/hw/net/pcnet-pci.c
index b86bc0d79b..5df79f0a1f 100644
--- a/hw/net/pcnet-pci.c
+++ b/hw/net/pcnet-pci.c
@@ -33,6 +33,7 @@
#include "qemu/timer.h"
#include "sysemu/dma.h"
#include "sysemu/sysemu.h"
+#include "trace.h"
#include "pcnet.h"
@@ -61,9 +62,8 @@ typedef struct {
static void pcnet_aprom_writeb(void *opaque, uint32_t addr, uint32_t val)
{
PCNetState *s = opaque;
-#ifdef PCNET_DEBUG
- printf("pcnet_aprom_writeb addr=0x%08x val=0x%02x\n", addr, val);
-#endif
+
+ trace_pcnet_aprom_writeb(opaque, addr, val);
if (BCR_APROMWE(s)) {
s->prom[addr & 15] = val;
}
@@ -73,9 +73,8 @@ static uint32_t pcnet_aprom_readb(void *opaque, uint32_t addr)
{
PCNetState *s = opaque;
uint32_t val = s->prom[addr & 15];
-#ifdef PCNET_DEBUG
- printf("pcnet_aprom_readb addr=0x%08x val=0x%02x\n", addr, val);
-#endif
+
+ trace_pcnet_aprom_readb(opaque, addr, val);
return val;
}
@@ -84,6 +83,7 @@ static uint64_t pcnet_ioport_read(void *opaque, hwaddr addr,
{
PCNetState *d = opaque;
+ trace_pcnet_ioport_read(opaque, addr, size);
if (addr < 0x10) {
if (!BCR_DWIO(d) && size == 1) {
return pcnet_aprom_readb(d, addr);
@@ -111,6 +111,7 @@ static void pcnet_ioport_write(void *opaque, hwaddr addr,
{
PCNetState *d = opaque;
+ trace_pcnet_ioport_write(opaque, addr, data, size);
if (addr < 0x10) {
if (!BCR_DWIO(d) && size == 1) {
pcnet_aprom_writeb(d, addr, data);
@@ -141,10 +142,8 @@ static const MemoryRegionOps pcnet_io_ops = {
static void pcnet_mmio_writeb(void *opaque, hwaddr addr, uint32_t val)
{
PCNetState *d = opaque;
-#ifdef PCNET_DEBUG_IO
- printf("pcnet_mmio_writeb addr=0x" TARGET_FMT_plx" val=0x%02x\n", addr,
- val);
-#endif
+
+ trace_pcnet_mmio_writeb(opaque, addr, val);
if (!(addr & 0x10))
pcnet_aprom_writeb(d, addr & 0x0f, val);
}
@@ -153,22 +152,18 @@ static uint32_t pcnet_mmio_readb(void *opaque, hwaddr addr)
{
PCNetState *d = opaque;
uint32_t val = -1;
+
if (!(addr & 0x10))
val = pcnet_aprom_readb(d, addr & 0x0f);
-#ifdef PCNET_DEBUG_IO
- printf("pcnet_mmio_readb addr=0x" TARGET_FMT_plx " val=0x%02x\n", addr,
- val & 0xff);
-#endif
+ trace_pcnet_mmio_readb(opaque, addr, val);
return val;
}
static void pcnet_mmio_writew(void *opaque, hwaddr addr, uint32_t val)
{
PCNetState *d = opaque;
-#ifdef PCNET_DEBUG_IO
- printf("pcnet_mmio_writew addr=0x" TARGET_FMT_plx " val=0x%04x\n", addr,
- val);
-#endif
+
+ trace_pcnet_mmio_writew(opaque, addr, val);
if (addr & 0x10)
pcnet_ioport_writew(d, addr & 0x0f, val);
else {
@@ -182,6 +177,7 @@ static uint32_t pcnet_mmio_readw(void *opaque, hwaddr addr)
{
PCNetState *d = opaque;
uint32_t val = -1;
+
if (addr & 0x10)
val = pcnet_ioport_readw(d, addr & 0x0f);
else {
@@ -190,20 +186,15 @@ static uint32_t pcnet_mmio_readw(void *opaque, hwaddr addr)
val <<= 8;
val |= pcnet_aprom_readb(d, addr);
}
-#ifdef PCNET_DEBUG_IO
- printf("pcnet_mmio_readw addr=0x" TARGET_FMT_plx" val = 0x%04x\n", addr,
- val & 0xffff);
-#endif
+ trace_pcnet_mmio_readw(opaque, addr, val);
return val;
}
static void pcnet_mmio_writel(void *opaque, hwaddr addr, uint32_t val)
{
PCNetState *d = opaque;
-#ifdef PCNET_DEBUG_IO
- printf("pcnet_mmio_writel addr=0x" TARGET_FMT_plx" val=0x%08x\n", addr,
- val);
-#endif
+
+ trace_pcnet_mmio_writel(opaque, addr, val);
if (addr & 0x10)
pcnet_ioport_writel(d, addr & 0x0f, val);
else {
@@ -219,6 +210,7 @@ static uint32_t pcnet_mmio_readl(void *opaque, hwaddr addr)
{
PCNetState *d = opaque;
uint32_t val;
+
if (addr & 0x10)
val = pcnet_ioport_readl(d, addr & 0x0f);
else {
@@ -231,10 +223,7 @@ static uint32_t pcnet_mmio_readl(void *opaque, hwaddr addr)
val <<= 8;
val |= pcnet_aprom_readb(d, addr);
}
-#ifdef PCNET_DEBUG_IO
- printf("pcnet_mmio_readl addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr,
- val);
-#endif
+ trace_pcnet_mmio_readl(opaque, addr, val);
return val;
}
diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
index 8486b80bb7..7778b9ad47 100644
--- a/hw/net/pcnet.c
+++ b/hw/net/pcnet.c
@@ -40,6 +40,7 @@
#include "qemu/timer.h"
#include "qemu/sockets.h"
#include "sysemu/sysemu.h"
+#include "trace.h"
#include "pcnet.h"
@@ -685,9 +686,7 @@ static void pcnet_bcr_writew(PCNetState *s, uint32_t rap, uint32_t val);
static void pcnet_s_reset(PCNetState *s)
{
-#ifdef PCNET_DEBUG
- printf("pcnet_s_reset\n");
-#endif
+ trace_pcnet_s_reset(s);
s->rdra = 0;
s->tdra = 0;
@@ -760,9 +759,7 @@ static void pcnet_update_irq(PCNetState *s)
s->csr[4] |= 0x0040;
s->csr[0] |= 0x0080;
isr = 1;
-#ifdef PCNET_DEBUG
- printf("pcnet user int\n");
-#endif
+ trace_pcnet_user_int(s);
}
#if 1
@@ -777,9 +774,7 @@ static void pcnet_update_irq(PCNetState *s)
}
if (isr != s->isr) {
-#ifdef PCNET_DEBUG
- printf("pcnet: INTA=%d\n", isr);
-#endif
+ trace_pcnet_isr_change(s, isr, s->isr);
}
qemu_set_irq(s->irq, isr);
s->isr = isr;
@@ -791,9 +786,7 @@ static void pcnet_init(PCNetState *s)
uint16_t padr[3], ladrf[4], mode;
uint32_t rdra, tdra;
-#ifdef PCNET_DEBUG
- printf("pcnet_init init_addr=0x%08x\n", PHYSADDR(s,CSR_IADR(s)));
-#endif
+ trace_pcnet_init(s, PHYSADDR(s, CSR_IADR(s)));
if (BCR_SSIZE32(s)) {
struct pcnet_initblk32 initblk;
@@ -831,9 +824,7 @@ static void pcnet_init(PCNetState *s)
tdra &= 0x00ffffff;
}
-#if defined(PCNET_DEBUG)
- printf("rlen=%d tlen=%d\n", rlen, tlen);
-#endif
+ trace_pcnet_rlen_tlen(s, rlen, tlen);
CSR_RCVRL(s) = (rlen < 9) ? (1 << rlen) : 512;
CSR_XMTRL(s) = (tlen < 9) ? (1 << tlen) : 512;
@@ -852,11 +843,8 @@ static void pcnet_init(PCNetState *s)
CSR_RCVRC(s) = CSR_RCVRL(s);
CSR_XMTRC(s) = CSR_XMTRL(s);
-#ifdef PCNET_DEBUG
- printf("pcnet ss32=%d rdra=0x%08x[%d] tdra=0x%08x[%d]\n",
- BCR_SSIZE32(s),
- s->rdra, CSR_RCVRL(s), s->tdra, CSR_XMTRL(s));
-#endif
+ trace_pcnet_ss32_rdra_tdra(s, BCR_SSIZE32(s),
+ s->rdra, CSR_RCVRL(s), s->tdra, CSR_XMTRL(s));
s->csr[0] |= 0x0101;
s->csr[0] &= ~0x0004; /* clear STOP bit */
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index b7b87a60cf..2d1be06f41 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -2075,20 +2075,6 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s)
"length to %d\n", txsize);
}
- if (!s->cplus_txbuffer)
- {
- /* out of memory */
-
- DPRINTF("+++ C+ mode transmiter failed to reallocate %d bytes\n",
- s->cplus_txbuffer_len);
-
- /* update tally counter */
- ++s->tally_counters.TxERR;
- ++s->tally_counters.TxAbt;
-
- return 0;
- }
-
/* append more data to the packet */
DPRINTF("+++ C+ mode transmit reading %d bytes from host memory at "
diff --git a/hw/pci-host/Makefile.objs b/hw/pci-host/Makefile.objs
index bb65f9c4d2..45f1f0ebab 100644
--- a/hw/pci-host/Makefile.objs
+++ b/hw/pci-host/Makefile.objs
@@ -15,3 +15,4 @@ common-obj-$(CONFIG_PCI_APB) += apb.o
common-obj-$(CONFIG_FULONG) += bonito.o
common-obj-$(CONFIG_PCI_PIIX) += piix.o
common-obj-$(CONFIG_PCI_Q35) += q35.o
+common-obj-$(CONFIG_PCI_GENERIC) += gpex.o
diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c
new file mode 100644
index 0000000000..9d8fb5a496
--- /dev/null
+++ b/hw/pci-host/gpex.c
@@ -0,0 +1,154 @@
+/*
+ * QEMU Generic PCI Express Bridge Emulation
+ *
+ * Copyright (C) 2015 Alexander Graf <agraf@suse.de>
+ *
+ * Code loosely based on q35.c.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Check out these documents for more information on the device:
+ *
+ * http://www.kernel.org/doc/Documentation/devicetree/bindings/pci/host-generic-pci.txt
+ * http://www.firmware.org/1275/practice/imap/imap0_9d.pdf
+ */
+#include "hw/hw.h"
+#include "hw/pci-host/gpex.h"
+
+/****************************************************************************
+ * GPEX host
+ */
+
+static void gpex_set_irq(void *opaque, int irq_num, int level)
+{
+ GPEXHost *s = opaque;
+
+ qemu_set_irq(s->irq[irq_num], level);
+}
+
+static void gpex_host_realize(DeviceState *dev, Error **errp)
+{
+ PCIHostState *pci = PCI_HOST_BRIDGE(dev);
+ GPEXHost *s = GPEX_HOST(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev);
+ int i;
+
+ pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX);
+ memory_region_init(&s->io_mmio, OBJECT(s), "gpex_mmio", UINT64_MAX);
+ memory_region_init(&s->io_ioport, OBJECT(s), "gpex_ioport", 64 * 1024);
+
+ sysbus_init_mmio(sbd, &pex->mmio);
+ sysbus_init_mmio(sbd, &s->io_mmio);
+ sysbus_init_mmio(sbd, &s->io_ioport);
+ for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ sysbus_init_irq(sbd, &s->irq[i]);
+ }
+
+ pci->bus = pci_register_bus(dev, "pcie.0", gpex_set_irq,
+ pci_swizzle_map_irq_fn, s, &s->io_mmio,
+ &s->io_ioport, 0, 4, TYPE_PCIE_BUS);
+
+ qdev_set_parent_bus(DEVICE(&s->gpex_root), BUS(pci->bus));
+ qdev_init_nofail(DEVICE(&s->gpex_root));
+}
+
+static const char *gpex_host_root_bus_path(PCIHostState *host_bridge,
+ PCIBus *rootbus)
+{
+ return "0000:00";
+}
+
+static void gpex_host_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
+
+ hc->root_bus_path = gpex_host_root_bus_path;
+ dc->realize = gpex_host_realize;
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+ dc->fw_name = "pci";
+}
+
+static void gpex_host_initfn(Object *obj)
+{
+ GPEXHost *s = GPEX_HOST(obj);
+ GPEXRootState *root = &s->gpex_root;
+
+ object_initialize(root, sizeof(*root), TYPE_GPEX_ROOT_DEVICE);
+ object_property_add_child(obj, "gpex_root", OBJECT(root), NULL);
+ qdev_prop_set_uint32(DEVICE(root), "addr", PCI_DEVFN(0, 0));
+ qdev_prop_set_bit(DEVICE(root), "multifunction", false);
+}
+
+static const TypeInfo gpex_host_info = {
+ .name = TYPE_GPEX_HOST,
+ .parent = TYPE_PCIE_HOST_BRIDGE,
+ .instance_size = sizeof(GPEXHost),
+ .instance_init = gpex_host_initfn,
+ .class_init = gpex_host_class_init,
+};
+
+/****************************************************************************
+ * GPEX Root D0:F0
+ */
+
+static const VMStateDescription vmstate_gpex_root = {
+ .name = "gpex_root",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(parent_obj, GPEXRootState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void gpex_root_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+ dc->desc = "QEMU generic PCIe host bridge";
+ dc->vmsd = &vmstate_gpex_root;
+ k->vendor_id = PCI_VENDOR_ID_REDHAT;
+ k->device_id = PCI_DEVICE_ID_REDHAT_PCIE_HOST;
+ k->revision = 0;
+ k->class_id = PCI_CLASS_BRIDGE_HOST;
+ /*
+ * PCI-facing part of the host bridge, not usable without the
+ * host-facing part, which can't be device_add'ed, yet.
+ */
+ dc->cannot_instantiate_with_device_add_yet = true;
+}
+
+static const TypeInfo gpex_root_info = {
+ .name = TYPE_GPEX_ROOT_DEVICE,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(GPEXRootState),
+ .class_init = gpex_root_class_init,
+};
+
+static void gpex_register(void)
+{
+ type_register_static(&gpex_root_info);
+ type_register_static(&gpex_host_info);
+}
+
+type_init(gpex_register)
diff --git a/hw/pci-host/piix.c b/hw/pci-host/piix.c
index 1530038cb0..8ea718e18e 100644
--- a/hw/pci-host/piix.c
+++ b/hw/pci-host/piix.c
@@ -635,7 +635,8 @@ static int piix3_initfn(PCIDevice *dev)
{
PIIX3State *d = DO_UPCAST(PIIX3State, dev, dev);
- isa_bus_new(DEVICE(d), pci_address_space_io(dev));
+ isa_bus_new(DEVICE(d), get_system_memory(),
+ pci_address_space_io(dev));
memory_region_init_io(&d->rcr_mem, OBJECT(dev), &rcr_ops, d,
"piix3-reset-control", 1);
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index b560459e83..812d03054d 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1625,7 +1625,7 @@ static int spapr_kvm_type(const char *vm_type)
}
/*
- * Implementation of an interface to adjust firmware patch
+ * Implementation of an interface to adjust firmware path
* for the bootindex property handling.
*/
static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index dc9e46a7b1..032ee1a5f1 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -648,7 +648,7 @@ int spapr_populate_vdevice(VIOsPAPRBus *bus, void *fdt)
ret = 0;
out:
- free(qdevs);
+ g_free(qdevs);
return ret;
}
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 03a1e8cfcf..418d73b1b4 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -94,7 +94,7 @@ void virtio_scsi_vring_push_notify(VirtIOSCSIReq *req)
{
VirtIODevice *vdev = VIRTIO_DEVICE(req->vring->parent);
- vring_push(&req->vring->vring, &req->elem,
+ vring_push(vdev, &req->vring->vring, &req->elem,
req->qsgl.size + req->resp_iov.size);
if (vring_should_notify(vdev, &req->vring->vring)) {
diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c
index 3ff5bd8871..4620cc613a 100644
--- a/hw/sparc64/sun4u.c
+++ b/hw/sparc64/sun4u.c
@@ -596,7 +596,8 @@ pci_ebus_init1(PCIDevice *pci_dev)
{
EbusState *s = DO_UPCAST(EbusState, pci_dev, pci_dev);
- isa_bus_new(&pci_dev->qdev, pci_address_space_io(pci_dev));
+ isa_bus_new(DEVICE(pci_dev), get_system_memory(),
+ pci_address_space_io(pci_dev));
pci_dev->config[0x04] = 0x06; // command = bus master, pci mem
pci_dev->config[0x05] = 0x00;
diff --git a/hw/usb/desc-msos.c b/hw/usb/desc-msos.c
index 334d1aea8d..32c3600df8 100644
--- a/hw/usb/desc-msos.c
+++ b/hw/usb/desc-msos.c
@@ -231,7 +231,7 @@ int usb_desc_msos(const USBDesc *desc, USBPacket *p,
length = len;
}
memcpy(dest, buf, length);
- free(buf);
+ g_free(buf);
p->actual_length = length;
return 0;
diff --git a/hw/virtio/Makefile.objs b/hw/virtio/Makefile.objs
index d21c397756..19b224a44d 100644
--- a/hw/virtio/Makefile.objs
+++ b/hw/virtio/Makefile.objs
@@ -2,7 +2,7 @@ common-obj-y += virtio-rng.o
common-obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o
common-obj-y += virtio-bus.o
common-obj-y += virtio-mmio.o
-common-obj-$(CONFIG_VIRTIO) += dataplane/
+obj-$(CONFIG_VIRTIO) += dataplane/
obj-y += virtio.o virtio-balloon.o
obj-$(CONFIG_LINUX) += vhost.o vhost-backend.o vhost-user.o
diff --git a/hw/virtio/dataplane/Makefile.objs b/hw/virtio/dataplane/Makefile.objs
index 9a8cfc0297..753a9cab44 100644
--- a/hw/virtio/dataplane/Makefile.objs
+++ b/hw/virtio/dataplane/Makefile.objs
@@ -1 +1 @@
-common-obj-y += vring.o
+obj-y += vring.o
diff --git a/hw/virtio/dataplane/vring.c b/hw/virtio/dataplane/vring.c
index 78c6f45a07..0936f659e5 100644
--- a/hw/virtio/dataplane/vring.c
+++ b/hw/virtio/dataplane/vring.c
@@ -18,7 +18,9 @@
#include "hw/hw.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
+#include "hw/virtio/virtio-access.h"
#include "hw/virtio/dataplane/vring.h"
+#include "hw/virtio/dataplane/vring-accessors.h"
#include "qemu/error-report.h"
/* vring_map can be coupled with vring_unmap or (if you still have the
@@ -83,7 +85,7 @@ bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
vring_init(&vring->vr, virtio_queue_get_num(vdev, n), vring_ptr, 4096);
vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n);
- vring->last_used_idx = vring->vr.used->idx;
+ vring->last_used_idx = vring_get_used_idx(vdev, vring);
vring->signalled_used = 0;
vring->signalled_used_valid = false;
@@ -104,7 +106,7 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
void vring_disable_notification(VirtIODevice *vdev, Vring *vring)
{
if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
- vring->vr.used->flags |= VRING_USED_F_NO_NOTIFY;
+ vring_set_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
}
}
@@ -117,10 +119,10 @@ bool vring_enable_notification(VirtIODevice *vdev, Vring *vring)
if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
vring_avail_event(&vring->vr) = vring->vr.avail->idx;
} else {
- vring->vr.used->flags &= ~VRING_USED_F_NO_NOTIFY;
+ vring_clear_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
}
smp_mb(); /* ensure update is seen before reading avail_idx */
- return !vring_more_avail(vring);
+ return !vring_more_avail(vdev, vring);
}
/* This is stolen from linux/drivers/vhost/vhost.c:vhost_notify() */
@@ -134,12 +136,13 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
smp_mb();
if ((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
- unlikely(vring->vr.avail->idx == vring->last_avail_idx)) {
+ unlikely(!vring_more_avail(vdev, vring))) {
return true;
}
if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
- return !(vring->vr.avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
+ return !(vring_get_avail_flags(vdev, vring) &
+ VRING_AVAIL_F_NO_INTERRUPT);
}
old = vring->signalled_used;
v = vring->signalled_used_valid;
@@ -202,9 +205,19 @@ static int get_desc(Vring *vring, VirtQueueElement *elem,
return 0;
}
+static void copy_in_vring_desc(VirtIODevice *vdev,
+ const struct vring_desc *guest,
+ struct vring_desc *host)
+{
+ host->addr = virtio_ldq_p(vdev, &guest->addr);
+ host->len = virtio_ldl_p(vdev, &guest->len);
+ host->flags = virtio_lduw_p(vdev, &guest->flags);
+ host->next = virtio_lduw_p(vdev, &guest->next);
+}
+
/* This is stolen from linux/drivers/vhost/vhost.c. */
-static int get_indirect(Vring *vring, VirtQueueElement *elem,
- struct vring_desc *indirect)
+static int get_indirect(VirtIODevice *vdev, Vring *vring,
+ VirtQueueElement *elem, struct vring_desc *indirect)
{
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
@@ -244,7 +257,7 @@ static int get_indirect(Vring *vring, VirtQueueElement *elem,
vring->broken = true;
return -EFAULT;
}
- desc = *desc_ptr;
+ copy_in_vring_desc(vdev, desc_ptr, &desc);
memory_region_unref(mr);
/* Ensure descriptor has been loaded before accessing fields */
@@ -320,7 +333,7 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vring->last_avail_idx;
- avail_idx = vring->vr.avail->idx;
+ avail_idx = vring_get_avail_idx(vdev, vring);
barrier(); /* load indices now and not again later */
if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)) {
@@ -341,7 +354,7 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
- head = vring->vr.avail->ring[last_avail_idx % num];
+ head = vring_get_avail_ring(vdev, vring, last_avail_idx % num);
elem->index = head;
@@ -365,13 +378,13 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
ret = -EFAULT;
goto out;
}
- desc = vring->vr.desc[i];
+ copy_in_vring_desc(vdev, &vring->vr.desc[i], &desc);
/* Ensure descriptor is loaded before accessing fields */
barrier();
if (desc.flags & VRING_DESC_F_INDIRECT) {
- ret = get_indirect(vring, elem, &desc);
+ ret = get_indirect(vdev, vring, elem, &desc);
if (ret < 0) {
goto out;
}
@@ -407,9 +420,9 @@ out:
*
* Stolen from linux/drivers/vhost/vhost.c.
*/
-void vring_push(Vring *vring, VirtQueueElement *elem, int len)
+void vring_push(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
+ int len)
{
- struct vring_used_elem *used;
unsigned int head = elem->index;
uint16_t new;
@@ -422,14 +435,16 @@ void vring_push(Vring *vring, VirtQueueElement *elem, int len)
/* The virtqueue contains a ring of used buffers. Get a pointer to the
* next entry in that used ring. */
- used = &vring->vr.used->ring[vring->last_used_idx % vring->vr.num];
- used->id = head;
- used->len = len;
+ vring_set_used_ring_id(vdev, vring, vring->last_used_idx % vring->vr.num,
+ head);
+ vring_set_used_ring_len(vdev, vring, vring->last_used_idx % vring->vr.num,
+ len);
/* Make sure buffer is written before we update index. */
smp_wmb();
- new = vring->vr.used->idx = ++vring->last_used_idx;
+ new = ++vring->last_used_idx;
+ vring_set_used_idx(vdev, vring, new);
if (unlikely((int16_t)(new - vring->signalled_used) < (uint16_t)1)) {
vring->signalled_used_valid = false;
}
diff --git a/include/block/accounting.h b/include/block/accounting.h
index 50b42b3808..4c406cff7a 100644
--- a/include/block/accounting.h
+++ b/include/block/accounting.h
@@ -39,6 +39,7 @@ typedef struct BlockAcctStats {
uint64_t nr_bytes[BLOCK_MAX_IOTYPE];
uint64_t nr_ops[BLOCK_MAX_IOTYPE];
uint64_t total_time_ns[BLOCK_MAX_IOTYPE];
+ uint64_t merged[BLOCK_MAX_IOTYPE];
uint64_t wr_highest_sector;
} BlockAcctStats;
@@ -53,5 +54,7 @@ void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie,
void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie);
void block_acct_highest_sector(BlockAcctStats *stats, int64_t sector_num,
unsigned int nb_sectors);
+void block_acct_merge_done(BlockAcctStats *stats, enum BlockAcctType type,
+ int num_requests);
#endif
diff --git a/include/block/block.h b/include/block/block.h
index 3082d2b80e..471d11dbbe 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -83,6 +83,9 @@ typedef enum {
#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
#define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1)
+#define BDRV_REQUEST_MAX_SECTORS MIN(SIZE_MAX >> BDRV_SECTOR_BITS, \
+ INT_MAX >> BDRV_SECTOR_BITS)
+
/*
* Allocation status flags
* BDRV_BLOCK_DATA: data is read from bs->file or another file
@@ -165,7 +168,8 @@ void bdrv_io_limits_disable(BlockDriverState *bs);
void bdrv_init(void);
void bdrv_init_with_whitelist(void);
BlockDriver *bdrv_find_protocol(const char *filename,
- bool allow_protocol_prefix);
+ bool allow_protocol_prefix,
+ Error **errp);
BlockDriver *bdrv_find_format(const char *format_name);
BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
bool readonly);
@@ -378,6 +382,7 @@ BlockDriverState *bdrv_next(BlockDriverState *bs);
int bdrv_is_encrypted(BlockDriverState *bs);
int bdrv_key_required(BlockDriverState *bs);
int bdrv_set_key(BlockDriverState *bs, const char *key);
+void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp);
int bdrv_query_missing_keys(void);
void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
void *opaque);
diff --git a/include/block/block_int.h b/include/block/block_int.h
index e264be97b2..b340e7e140 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -369,9 +369,6 @@ struct BlockDriverState {
/* I/O Limits */
BlockLimits bl;
- /* Whether the disk can expand beyond total_sectors */
- int growable;
-
/* Whether produces zeros when read beyond eof */
bool zero_beyond_eof;
@@ -412,6 +409,10 @@ struct BlockDriverState {
/* The error object in use for blocking operations on backing_hd */
Error *backing_blocker;
+
+ /* threshold limit for writes, in bytes. "High water mark". */
+ uint64_t write_threshold_offset;
+ NotifierWithReturn write_threshold_notifier;
};
diff --git a/include/block/nbd.h b/include/block/nbd.h
index 348302c90b..ca9a5ac5b3 100644
--- a/include/block/nbd.h
+++ b/include/block/nbd.h
@@ -75,7 +75,7 @@ enum {
ssize_t nbd_wr_sync(int fd, void *buffer, size_t size, bool do_read);
int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags,
- off_t *size, size_t *blocksize);
+ off_t *size, size_t *blocksize, Error **errp);
int nbd_init(int fd, int csock, uint32_t flags, off_t size, size_t blocksize);
ssize_t nbd_send_request(int csock, struct nbd_request *request);
ssize_t nbd_receive_reply(int csock, struct nbd_reply *reply);
@@ -99,7 +99,6 @@ void nbd_export_close_all(void);
NBDClient *nbd_client_new(NBDExport *exp, int csock,
void (*close)(NBDClient *));
-void nbd_client_close(NBDClient *client);
void nbd_client_get(NBDClient *client);
void nbd_client_put(NBDClient *client);
diff --git a/include/block/write-threshold.h b/include/block/write-threshold.h
new file mode 100644
index 0000000000..f1b899cd5f
--- /dev/null
+++ b/include/block/write-threshold.h
@@ -0,0 +1,64 @@
+/*
+ * QEMU System Emulator block write threshold notification
+ *
+ * Copyright Red Hat, Inc. 2014
+ *
+ * Authors:
+ * Francesco Romani <fromani@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+#ifndef BLOCK_WRITE_THRESHOLD_H
+#define BLOCK_WRITE_THRESHOLD_H
+
+#include <stdint.h>
+
+#include "qemu/typedefs.h"
+#include "qemu-common.h"
+
+/*
+ * bdrv_write_threshold_set:
+ *
+ * Set the write threshold for block devices, in bytes.
+ * Notify when a write exceeds the threshold, meaning the device
+ * is becoming full, so it can be transparently resized.
+ * To be used with thin-provisioned block devices.
+ *
+ * Use threshold_bytes == 0 to disable.
+ */
+void bdrv_write_threshold_set(BlockDriverState *bs, uint64_t threshold_bytes);
+
+/*
+ * bdrv_write_threshold_get
+ *
+ * Get the configured write threshold, in bytes.
+ * Zero means no threshold configured.
+ */
+uint64_t bdrv_write_threshold_get(const BlockDriverState *bs);
+
+/*
+ * bdrv_write_threshold_is_set
+ *
+ * Tell if a write threshold is set for a given BDS.
+ */
+bool bdrv_write_threshold_is_set(const BlockDriverState *bs);
+
+/*
+ * bdrv_write_threshold_exceeded
+ *
+ * Return the extent of a write request that exceeded the threshold,
+ * or zero if the request is below the threshold.
+ * Return zero also if the threshold was not set.
+ *
+ * NOTE: here we assume the following holds for each request this code
+ * deals with:
+ *
+ * assert((req->offset + req->bytes) <= UINT64_MAX)
+ *
+ * Please not there is *not* an actual C assert().
+ */
+uint64_t bdrv_write_threshold_exceeded(const BlockDriverState *bs,
+ const BdrvTrackedRequest *req);
+
+#endif
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
index 221aad0bfd..6e5b01239b 100644
--- a/include/exec/gen-icount.h
+++ b/include/exec/gen-icount.h
@@ -11,8 +11,8 @@ static int exitreq_label;
static inline void gen_tb_start(TranslationBlock *tb)
{
- TCGv_i32 count;
- TCGv_i32 flag;
+ TCGv_i32 count, flag, imm;
+ int i;
exitreq_label = gen_new_label();
flag = tcg_temp_new_i32();
@@ -21,16 +21,25 @@ static inline void gen_tb_start(TranslationBlock *tb)
tcg_gen_brcondi_i32(TCG_COND_NE, flag, 0, exitreq_label);
tcg_temp_free_i32(flag);
- if (!(tb->cflags & CF_USE_ICOUNT))
+ if (!(tb->cflags & CF_USE_ICOUNT)) {
return;
+ }
icount_label = gen_new_label();
count = tcg_temp_local_new_i32();
tcg_gen_ld_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
+
+ imm = tcg_temp_new_i32();
+ tcg_gen_movi_i32(imm, 0xdeadbeef);
+
/* This is a horrid hack to allow fixing up the value later. */
- icount_arg = tcg_ctx.gen_opparam_ptr + 1;
- tcg_gen_subi_i32(count, count, 0xdeadbeef);
+ i = tcg_ctx.gen_last_op_idx;
+ i = tcg_ctx.gen_op_buf[i].args;
+ icount_arg = &tcg_ctx.gen_opparam_buf[i + 1];
+
+ tcg_gen_sub_i32(count, count, imm);
+ tcg_temp_free_i32(imm);
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
tcg_gen_st16_i32(count, cpu_env,
@@ -48,6 +57,9 @@ static void gen_tb_end(TranslationBlock *tb, int num_insns)
gen_set_label(icount_label);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
}
+
+ /* Terminate the linked list. */
+ tcg_ctx.gen_op_buf[tcg_ctx.gen_last_op_idx].next = -1;
}
static inline void gen_io_start(void)
diff --git a/include/hw/isa/isa.h b/include/hw/isa/isa.h
index e0c749f9e9..cf7bd343c7 100644
--- a/include/hw/isa/isa.h
+++ b/include/hw/isa/isa.h
@@ -36,6 +36,7 @@ struct ISABus {
BusState parent_obj;
/*< public >*/
+ MemoryRegion *address_space;
MemoryRegion *address_space_io;
qemu_irq *irqs;
};
@@ -50,7 +51,8 @@ struct ISADevice {
int ioport_id;
};
-ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space_io);
+ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space,
+ MemoryRegion *address_space_io);
void isa_bus_irqs(ISABus *bus, qemu_irq *irqs);
qemu_irq isa_get_irq(ISADevice *dev, int isairq);
void isa_init_irq(ISADevice *dev, qemu_irq *p, int isairq);
@@ -97,8 +99,6 @@ static inline ISABus *isa_bus_from_device(ISADevice *d)
return ISA_BUS(qdev_get_parent_bus(DEVICE(d)));
}
-extern hwaddr isa_mem_base;
-
/* dma.c */
int DMA_get_channel_mode (int nchan);
int DMA_read_memory (int nchan, void *buf, int pos, int size);
diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h
new file mode 100644
index 0000000000..68c93488c9
--- /dev/null
+++ b/include/hw/pci-host/gpex.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU Generic PCI Express Bridge Emulation
+ *
+ * Copyright (C) 2015 Alexander Graf <agraf@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#ifndef HW_GPEX_H
+#define HW_GPEX_H
+
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pcie_host.h"
+
+#define TYPE_GPEX_HOST "gpex-pcihost"
+#define GPEX_HOST(obj) \
+ OBJECT_CHECK(GPEXHost, (obj), TYPE_GPEX_HOST)
+
+#define TYPE_GPEX_ROOT_DEVICE "gpex-root"
+#define MCH_PCI_DEVICE(obj) \
+ OBJECT_CHECK(GPEXRootState, (obj), TYPE_GPEX_ROOT_DEVICE)
+
+#define GPEX_NUM_IRQS 4
+
+typedef struct GPEXRootState {
+ /*< private >*/
+ PCIDevice parent_obj;
+ /*< public >*/
+} GPEXRootState;
+
+typedef struct GPEXHost {
+ /*< private >*/
+ PCIExpressHost parent_obj;
+ /*< public >*/
+
+ GPEXRootState gpex_root;
+
+ MemoryRegion io_ioport;
+ MemoryRegion io_mmio;
+ qemu_irq irq[GPEX_NUM_IRQS];
+} GPEXHost;
+
+#endif /* HW_GPEX_H */
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index 97a83d362f..bdee464e61 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -89,6 +89,7 @@
#define PCI_DEVICE_ID_REDHAT_SERIAL4 0x0004
#define PCI_DEVICE_ID_REDHAT_TEST 0x0005
#define PCI_DEVICE_ID_REDHAT_SDHCI 0x0007
+#define PCI_DEVICE_ID_REDHAT_PCIE_HOST 0x0008
#define PCI_DEVICE_ID_REDHAT_QXL 0x0100
#define FMT_PCIBUS PRIx64
diff --git a/include/hw/virtio/dataplane/vring-accessors.h b/include/hw/virtio/dataplane/vring-accessors.h
new file mode 100644
index 0000000000..b508b87900
--- /dev/null
+++ b/include/hw/virtio/dataplane/vring-accessors.h
@@ -0,0 +1,75 @@
+#ifndef VRING_ACCESSORS_H
+#define VRING_ACCESSORS_H
+
+#include "hw/virtio/virtio_ring.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-access.h"
+
+static inline uint16_t vring_get_used_idx(VirtIODevice *vdev, Vring *vring)
+{
+ return virtio_tswap16(vdev, vring->vr.used->idx);
+}
+
+static inline void vring_set_used_idx(VirtIODevice *vdev, Vring *vring,
+ uint16_t idx)
+{
+ vring->vr.used->idx = virtio_tswap16(vdev, idx);
+}
+
+static inline uint16_t vring_get_avail_idx(VirtIODevice *vdev, Vring *vring)
+{
+ return virtio_tswap16(vdev, vring->vr.avail->idx);
+}
+
+static inline uint16_t vring_get_avail_ring(VirtIODevice *vdev, Vring *vring,
+ int i)
+{
+ return virtio_tswap16(vdev, vring->vr.avail->ring[i]);
+}
+
+static inline void vring_set_used_ring_id(VirtIODevice *vdev, Vring *vring,
+ int i, uint32_t id)
+{
+ vring->vr.used->ring[i].id = virtio_tswap32(vdev, id);
+}
+
+static inline void vring_set_used_ring_len(VirtIODevice *vdev, Vring *vring,
+ int i, uint32_t len)
+{
+ vring->vr.used->ring[i].len = virtio_tswap32(vdev, len);
+}
+
+static inline uint16_t vring_get_used_flags(VirtIODevice *vdev, Vring *vring)
+{
+ return virtio_tswap16(vdev, vring->vr.used->flags);
+}
+
+static inline uint16_t vring_get_avail_flags(VirtIODevice *vdev, Vring *vring)
+{
+ return virtio_tswap16(vdev, vring->vr.avail->flags);
+}
+
+static inline void vring_set_used_flags(VirtIODevice *vdev, Vring *vring,
+ uint16_t flags)
+{
+ vring->vr.used->flags |= virtio_tswap16(vdev, flags);
+}
+
+static inline void vring_clear_used_flags(VirtIODevice *vdev, Vring *vring,
+ uint16_t flags)
+{
+ vring->vr.used->flags &= virtio_tswap16(vdev, ~flags);
+}
+
+static inline unsigned int vring_get_num(Vring *vring)
+{
+ return vring->vr.num;
+}
+
+/* Are there more descriptors available? */
+static inline bool vring_more_avail(VirtIODevice *vdev, Vring *vring)
+{
+ return vring_get_avail_idx(vdev, vring) != vring->last_avail_idx;
+}
+
+#endif
diff --git a/include/hw/virtio/dataplane/vring.h b/include/hw/virtio/dataplane/vring.h
index d3e086aefc..e42c0fc9cc 100644
--- a/include/hw/virtio/dataplane/vring.h
+++ b/include/hw/virtio/dataplane/vring.h
@@ -31,17 +31,6 @@ typedef struct {
bool broken; /* was there a fatal error? */
} Vring;
-static inline unsigned int vring_get_num(Vring *vring)
-{
- return vring->vr.num;
-}
-
-/* Are there more descriptors available? */
-static inline bool vring_more_avail(Vring *vring)
-{
- return vring->vr.avail->idx != vring->last_avail_idx;
-}
-
/* Fail future vring_pop() and vring_push() calls until reset */
static inline void vring_set_broken(Vring *vring)
{
@@ -54,6 +43,7 @@ void vring_disable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_should_notify(VirtIODevice *vdev, Vring *vring);
int vring_pop(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem);
-void vring_push(Vring *vring, VirtQueueElement *elem, int len);
+void vring_push(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
+ int len);
#endif /* VRING_H */
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 4652b70b5d..fc7d311a43 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -113,6 +113,7 @@ struct VirtIOBlkConf
uint32_t scsi;
uint32_t config_wce;
uint32_t data_plane;
+ uint32_t request_merging;
};
struct VirtIOBlockDataPlane;
@@ -134,27 +135,32 @@ typedef struct VirtIOBlock {
struct VirtIOBlockDataPlane *dataplane;
} VirtIOBlock;
-typedef struct MultiReqBuffer {
- BlockRequest blkreq[32];
- unsigned int num_writes;
-} MultiReqBuffer;
-
typedef struct VirtIOBlockReq {
+ int64_t sector_num;
VirtIOBlock *dev;
VirtQueueElement elem;
struct virtio_blk_inhdr *in;
struct virtio_blk_outhdr out;
QEMUIOVector qiov;
struct VirtIOBlockReq *next;
+ struct VirtIOBlockReq *mr_next;
BlockAcctCookie acct;
} VirtIOBlockReq;
+#define VIRTIO_BLK_MAX_MERGE_REQS 32
+
+typedef struct MultiReqBuffer {
+ VirtIOBlockReq *reqs[VIRTIO_BLK_MAX_MERGE_REQS];
+ unsigned int num_reqs;
+ bool is_write;
+} MultiReqBuffer;
+
VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s);
void virtio_blk_free_request(VirtIOBlockReq *req);
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb);
-void virtio_submit_multiwrite(BlockBackend *blk, MultiReqBuffer *mrb);
+void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb);
#endif
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 0b26bc68dd..c20f2d1fee 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -139,9 +139,7 @@ struct VMStateDescription {
const VMStateSubsection *subsections;
};
-#ifdef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_dummy;
-#endif
extern const VMStateInfo vmstate_info_bool;
diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h
index eeaf0cb981..986260f466 100644
--- a/include/qapi/qmp/qerror.h
+++ b/include/qapi/qmp/qerror.h
@@ -37,9 +37,6 @@ void qerror_report_err(Error *err);
#define QERR_BASE_NOT_FOUND \
ERROR_CLASS_GENERIC_ERROR, "Base '%s' not found"
-#define QERR_BLOCK_JOB_NOT_ACTIVE \
- ERROR_CLASS_DEVICE_NOT_ACTIVE, "No active block job on device '%s'"
-
#define QERR_BLOCK_JOB_NOT_READY \
ERROR_CLASS_GENERIC_ERROR, "The active block job for device '%s' cannot be completed"
@@ -52,9 +49,6 @@ void qerror_report_err(Error *err);
#define QERR_BUS_NOT_FOUND \
ERROR_CLASS_GENERIC_ERROR, "Bus '%s' not found"
-#define QERR_DEVICE_ENCRYPTED \
- ERROR_CLASS_DEVICE_ENCRYPTED, "'%s' (%s) is encrypted"
-
#define QERR_DEVICE_HAS_NO_MEDIUM \
ERROR_CLASS_GENERIC_ERROR, "Device '%s' has no medium"
@@ -70,9 +64,6 @@ void qerror_report_err(Error *err);
#define QERR_DEVICE_NO_HOTPLUG \
ERROR_CLASS_GENERIC_ERROR, "Device '%s' does not support hotplugging"
-#define QERR_DEVICE_NOT_ENCRYPTED \
- ERROR_CLASS_GENERIC_ERROR, "Device '%s' is not encrypted"
-
#define QERR_DEVICE_NOT_FOUND \
ERROR_CLASS_DEVICE_NOT_FOUND, "Device '%s' not found"
diff --git a/include/qemu-io.h b/include/qemu-io.h
index 5d6006f73b..4d402b9b01 100644
--- a/include/qemu-io.h
+++ b/include/qemu-io.h
@@ -22,7 +22,7 @@
#define CMD_FLAG_GLOBAL ((int)0x80000000) /* don't iterate "args" */
-typedef int (*cfunc_t)(BlockDriverState *bs, int argc, char **argv);
+typedef int (*cfunc_t)(BlockBackend *blk, int argc, char **argv);
typedef void (*helpfunc_t)(void);
typedef struct cmdinfo {
@@ -40,7 +40,7 @@ typedef struct cmdinfo {
extern bool qemuio_misalign;
-bool qemuio_command(BlockDriverState *bs, const char *cmd);
+bool qemuio_command(BlockBackend *blk, const char *cmd);
void qemuio_add_command(const cmdinfo_t *ci);
int qemuio_command_usage(const cmdinfo_t *ci);
diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h
index 8871a021d0..2e8ebb2535 100644
--- a/include/sysemu/block-backend.h
+++ b/include/sysemu/block-backend.h
@@ -62,6 +62,9 @@ typedef struct BlockDevOps {
BlockBackend *blk_new(const char *name, Error **errp);
BlockBackend *blk_new_with_bs(const char *name, Error **errp);
+BlockBackend *blk_new_open(const char *name, const char *filename,
+ const char *reference, QDict *options, int flags,
+ Error **errp);
void blk_ref(BlockBackend *blk);
void blk_unref(BlockBackend *blk);
const char *blk_name(BlockBackend *blk);
@@ -91,6 +94,7 @@ int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count);
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count);
int64_t blk_getlength(BlockBackend *blk);
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
+int64_t blk_nb_sectors(BlockBackend *blk);
BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque);
@@ -127,6 +131,7 @@ int blk_is_inserted(BlockBackend *blk);
void blk_lock_medium(BlockBackend *blk, bool locked);
void blk_eject(BlockBackend *blk, bool eject_flag);
int blk_get_flags(BlockBackend *blk);
+int blk_get_max_transfer_length(BlockBackend *blk);
void blk_set_guest_block_size(BlockBackend *blk, int align);
void *blk_blockalign(BlockBackend *blk, size_t size);
bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp);
@@ -150,5 +155,14 @@ BlockAcctStats *blk_get_stats(BlockBackend *blk);
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque);
+int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
+ int nb_sectors, BdrvRequestFlags flags);
+int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
+ const uint8_t *buf, int nb_sectors);
+int blk_truncate(BlockBackend *blk, int64_t offset);
+int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors);
+int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
+ int64_t pos, int size);
+int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
#endif
diff --git a/include/sysemu/device_tree.h b/include/sysemu/device_tree.h
index 899f05c138..359e14304f 100644
--- a/include/sysemu/device_tree.h
+++ b/include/sysemu/device_tree.h
@@ -110,4 +110,13 @@ int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
qdt_tmp); \
})
+#define FDT_PCI_RANGE_RELOCATABLE 0x80000000
+#define FDT_PCI_RANGE_PREFETCHABLE 0x40000000
+#define FDT_PCI_RANGE_ALIASED 0x20000000
+#define FDT_PCI_RANGE_TYPE_MASK 0x03000000
+#define FDT_PCI_RANGE_MMIO_64BIT 0x03000000
+#define FDT_PCI_RANGE_MMIO 0x02000000
+#define FDT_PCI_RANGE_IOPORT 0x01000000
+#define FDT_PCI_RANGE_CONFIG 0x00000000
+
#endif /* __DEVICE_TREE_H__ */
diff --git a/include/ui/console.h b/include/ui/console.h
index 8a4d671fbc..5c19c3c098 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -335,6 +335,7 @@ void vnc_display_init(const char *id);
void vnc_display_open(const char *id, Error **errp);
void vnc_display_add_client(const char *id, int csock, bool skipauth);
char *vnc_display_local_addr(const char *id);
+void vnc_auto_assign_id(QemuOptsList *olist, QemuOpts *opts);
#ifdef CONFIG_VNC
int vnc_display_password(const char *id, const char *password);
int vnc_display_pw_expire(const char *id, time_t expires);
diff --git a/kvm-all.c b/kvm-all.c
index 2f21a4e6fe..05a79c20e0 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -2070,10 +2070,6 @@ int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
}
bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
- if (!bp) {
- return -ENOMEM;
- }
-
bp->pc = addr;
bp->use_count = 1;
err = kvm_arch_insert_sw_breakpoint(cpu, bp);
diff --git a/libcacard/Makefile b/libcacard/Makefile
index 0e7903fcfb..b5eddff11e 100644
--- a/libcacard/Makefile
+++ b/libcacard/Makefile
@@ -19,6 +19,8 @@ vscclient$(EXESUF): libcacard/vscclient.o libcacard.la
libcacard.la: LDFLAGS += -rpath $(libdir) -no-undefined \
-export-symbols $(SRC_PATH)/libcacard/libcacard.syms
+# Prevent libcacard.so linking against the entire world of 3rd party libs
+libcacard.la: LIBS =
libcacard.la: $(libcacard-lobj-y)
$(call LINK,$^)
diff --git a/linux-user/main.c b/linux-user/main.c
index cfa7d07b7a..d92702a734 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -315,7 +315,7 @@ void cpu_loop(CPUX86State *env)
#endif
case EXCP0B_NOSEG:
case EXCP0C_STACK:
- info.si_signo = SIGBUS;
+ info.si_signo = TARGET_SIGBUS;
info.si_errno = 0;
info.si_code = TARGET_SI_KERNEL;
info._sifields._sigfault._addr = 0;
@@ -329,7 +329,7 @@ void cpu_loop(CPUX86State *env)
} else
#endif
{
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
info.si_code = TARGET_SI_KERNEL;
info._sifields._sigfault._addr = 0;
@@ -337,7 +337,7 @@ void cpu_loop(CPUX86State *env)
}
break;
case EXCP0E_PAGE:
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
if (!(env->error_code & 1))
info.si_code = TARGET_SEGV_MAPERR;
@@ -354,7 +354,7 @@ void cpu_loop(CPUX86State *env)
#endif
{
/* division by zero */
- info.si_signo = SIGFPE;
+ info.si_signo = TARGET_SIGFPE;
info.si_errno = 0;
info.si_code = TARGET_FPE_INTDIV;
info._sifields._sigfault._addr = env->eip;
@@ -369,7 +369,7 @@ void cpu_loop(CPUX86State *env)
} else
#endif
{
- info.si_signo = SIGTRAP;
+ info.si_signo = TARGET_SIGTRAP;
info.si_errno = 0;
if (trapnr == EXCP01_DB) {
info.si_code = TARGET_TRAP_BRKPT;
@@ -389,7 +389,7 @@ void cpu_loop(CPUX86State *env)
} else
#endif
{
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
info.si_code = TARGET_SI_KERNEL;
info._sifields._sigfault._addr = 0;
@@ -397,7 +397,7 @@ void cpu_loop(CPUX86State *env)
}
break;
case EXCP06_ILLOP:
- info.si_signo = SIGILL;
+ info.si_signo = TARGET_SIGILL;
info.si_errno = 0;
info.si_code = TARGET_ILL_ILLOPN;
info._sifields._sigfault._addr = env->eip;
@@ -519,7 +519,7 @@ segv:
end_exclusive();
/* We get the PC of the entry address - which is as good as anything,
on a real kernel what you get depends on which mode it uses. */
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
@@ -696,7 +696,7 @@ void cpu_loop(CPUARMState *env)
rc = EmulateAll(opcode, &ts->fpa, env);
if (rc == 0) { /* illegal instruction */
- info.si_signo = SIGILL;
+ info.si_signo = TARGET_SIGILL;
info.si_errno = 0;
info.si_code = TARGET_ILL_ILLOPN;
info._sifields._sigfault._addr = env->regs[15];
@@ -720,7 +720,7 @@ void cpu_loop(CPUARMState *env)
//printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
if (fpsr & (arm_fpe << 16)) { /* exception enabled? */
- info.si_signo = SIGFPE;
+ info.si_signo = TARGET_SIGFPE;
info.si_errno = 0;
/* ordered by priority, least first */
@@ -844,7 +844,7 @@ void cpu_loop(CPUARMState *env)
case EXCP_DATA_ABORT:
addr = env->exception.vaddress;
{
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
@@ -1030,7 +1030,7 @@ void cpu_loop(CPUARMState *env)
/* just indicate that signals should be handled asap */
break;
case EXCP_UDEF:
- info.si_signo = SIGILL;
+ info.si_signo = TARGET_SIGILL;
info.si_errno = 0;
info.si_code = TARGET_ILL_ILLOPN;
info._sifields._sigfault._addr = env->pc;
@@ -1043,7 +1043,7 @@ void cpu_loop(CPUARMState *env)
/* fall through for segv */
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
@@ -1123,7 +1123,7 @@ void cpu_loop(CPUUniCore32State *env)
break;
case UC32_EXCP_DTRAP:
case UC32_EXCP_ITRAP:
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
@@ -2672,7 +2672,7 @@ void cpu_loop(CPUOpenRISCState *env)
break;
case EXCP_BUSERR:
qemu_log("\nBus error, exit, pc is %#x\n", env->pc);
- gdbsig = SIGBUS;
+ gdbsig = TARGET_SIGBUS;
break;
case EXCP_DPF:
case EXCP_IPF:
@@ -2684,11 +2684,11 @@ void cpu_loop(CPUOpenRISCState *env)
break;
case EXCP_ALIGN:
qemu_log("\nAlignment pc is %#x\n", env->pc);
- gdbsig = SIGBUS;
+ gdbsig = TARGET_SIGBUS;
break;
case EXCP_ILLEGAL:
qemu_log("\nIllegal instructionpc is %#x\n", env->pc);
- gdbsig = SIGILL;
+ gdbsig = TARGET_SIGILL;
break;
case EXCP_INT:
qemu_log("\nExternal interruptpc is %#x\n", env->pc);
@@ -2699,7 +2699,7 @@ void cpu_loop(CPUOpenRISCState *env)
break;
case EXCP_RANGE:
qemu_log("\nRange\n");
- gdbsig = SIGSEGV;
+ gdbsig = TARGET_SIGSEGV;
break;
case EXCP_SYSCALL:
env->pc += 4; /* 0xc00; */
@@ -2717,7 +2717,7 @@ void cpu_loop(CPUOpenRISCState *env)
break;
case EXCP_TRAP:
qemu_log("\nTrap\n");
- gdbsig = SIGTRAP;
+ gdbsig = TARGET_SIGTRAP;
break;
case EXCP_NR:
qemu_log("\nNR\n");
@@ -2787,7 +2787,7 @@ void cpu_loop(CPUSH4State *env)
break;
case 0xa0:
case 0xc0:
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
info.si_code = TARGET_SEGV_MAPERR;
info._sifields._sigfault._addr = env->tea;
@@ -2818,7 +2818,7 @@ void cpu_loop(CPUCRISState *env)
switch (trapnr) {
case 0xaa:
{
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
@@ -2879,7 +2879,7 @@ void cpu_loop(CPUMBState *env)
switch (trapnr) {
case 0xaa:
{
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
@@ -2917,14 +2917,14 @@ void cpu_loop(CPUMBState *env)
switch (env->sregs[SR_ESR] & 31) {
case ESR_EC_DIVZERO:
- info.si_signo = SIGFPE;
+ info.si_signo = TARGET_SIGFPE;
info.si_errno = 0;
info.si_code = TARGET_FPE_FLTDIV;
info._sifields._sigfault._addr = 0;
queue_signal(env, info.si_signo, &info);
break;
case ESR_EC_FPU:
- info.si_signo = SIGFPE;
+ info.si_signo = TARGET_SIGFPE;
info.si_errno = 0;
if (env->sregs[SR_FSR] & FSR_IO) {
info.si_code = TARGET_FPE_FLTINV;
@@ -3003,7 +3003,7 @@ void cpu_loop(CPUM68KState *env)
case EXCP_LINEF:
case EXCP_UNSUPPORTED:
do_sigill:
- info.si_signo = SIGILL;
+ info.si_signo = TARGET_SIGILL;
info.si_errno = 0;
info.si_code = TARGET_ILL_ILLOPN;
info._sifields._sigfault._addr = env->pc;
@@ -3030,7 +3030,7 @@ void cpu_loop(CPUM68KState *env)
break;
case EXCP_ACCESS:
{
- info.si_signo = SIGSEGV;
+ info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
@@ -3337,12 +3337,12 @@ void cpu_loop(CPUS390XState *env)
switch (n) {
case PGM_OPERATION:
case PGM_PRIVILEGED:
- sig = SIGILL;
+ sig = TARGET_SIGILL;
n = TARGET_ILL_ILLOPC;
goto do_signal_pc;
case PGM_PROTECTION:
case PGM_ADDRESSING:
- sig = SIGSEGV;
+ sig = TARGET_SIGSEGV;
/* XXX: check env->error_code */
n = TARGET_SEGV_MAPERR;
addr = env->__excp_addr;
@@ -3352,16 +3352,16 @@ void cpu_loop(CPUS390XState *env)
case PGM_SPECIAL_OP:
case PGM_OPERAND:
do_sigill_opn:
- sig = SIGILL;
+ sig = TARGET_SIGILL;
n = TARGET_ILL_ILLOPN;
goto do_signal_pc;
case PGM_FIXPT_OVERFLOW:
- sig = SIGFPE;
+ sig = TARGET_SIGFPE;
n = TARGET_FPE_INTOVF;
goto do_signal_pc;
case PGM_FIXPT_DIVIDE:
- sig = SIGFPE;
+ sig = TARGET_SIGFPE;
n = TARGET_FPE_INTDIV;
goto do_signal_pc;
@@ -3386,7 +3386,7 @@ void cpu_loop(CPUS390XState *env)
/* ??? Quantum exception; BFP, DFP error. */
goto do_sigill_opn;
}
- sig = SIGFPE;
+ sig = TARGET_SIGFPE;
goto do_signal_pc;
}
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index d4398b9c56..5720195654 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -1883,6 +1883,11 @@ static struct iovec *lock_iovec(int type, abi_ulong target_addr,
return vec;
fail:
+ while (--i >= 0) {
+ if (tswapal(target_vec[i].iov_len) > 0) {
+ unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
+ }
+ }
unlock_user(target_vec, target_addr, 0);
fail2:
free(vec);
@@ -1901,7 +1906,7 @@ static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
if (target_vec) {
for (i = 0; i < count; i++) {
abi_ulong base = tswapal(target_vec[i].iov_base);
- abi_long len = tswapal(target_vec[i].iov_base);
+ abi_long len = tswapal(target_vec[i].iov_len);
if (len < 0) {
break;
}
@@ -3571,6 +3576,7 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
}
default:
ret = -TARGET_EINVAL;
+ unlock_user(argptr, guest_data, 0);
goto out;
}
unlock_user(argptr, guest_data, 0);
@@ -3690,6 +3696,7 @@ static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
break;
}
default:
+ unlock_user(argptr, guest_data, 0);
ret = -TARGET_EINVAL;
goto out;
}
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index ebb3be1196..edd5f3c80b 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -655,7 +655,14 @@ typedef struct {
#endif
#define TARGET_SI_MAX_SIZE 128
-#define TARGET_SI_PAD_SIZE ((TARGET_SI_MAX_SIZE/sizeof(int)) - 3)
+
+#if TARGET_ABI_BITS == 32
+#define TARGET_SI_PREAMBLE_SIZE (3 * sizeof(int))
+#else
+#define TARGET_SI_PREAMBLE_SIZE (4 * sizeof(int))
+#endif
+
+#define TARGET_SI_PAD_SIZE ((TARGET_SI_MAX_SIZE - TARGET_SI_PREAMBLE_SIZE) / sizeof(int))
typedef struct target_siginfo {
#ifdef TARGET_MIPS
@@ -1600,73 +1607,25 @@ struct target_stat {
#elif defined(TARGET_ABI_MIPSN32)
struct target_stat {
- unsigned st_dev;
- int st_pad1[3]; /* Reserved for network id */
- unsigned int st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- int st_uid;
- int st_gid;
- unsigned st_rdev;
- unsigned int st_pad2[2];
- unsigned int st_size;
- unsigned int st_pad3;
- /*
- * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
- * but we don't have it under Linux.
- */
- unsigned int target_st_atime;
- unsigned int target_st_atime_nsec;
- unsigned int target_st_mtime;
- unsigned int target_st_mtime_nsec;
- unsigned int target_st_ctime;
- unsigned int target_st_ctime_nsec;
- unsigned int st_blksize;
- unsigned int st_blocks;
- unsigned int st_pad4[14];
-};
-
-/*
- * This matches struct stat64 in glibc2.1, hence the absolutely insane
- * amounts of padding around dev_t's. The memory layout is the same as of
- * struct stat of the 64-bit kernel.
- */
-
-#define TARGET_HAS_STRUCT_STAT64
-struct target_stat64 {
- unsigned int st_dev;
- unsigned int st_pad0[3]; /* Reserved for st_dev expansion */
-
- target_ulong st_ino;
-
- unsigned int st_mode;
- unsigned int st_nlink;
-
- int st_uid;
- int st_gid;
-
- unsigned int st_rdev;
- unsigned int st_pad1[3]; /* Reserved for st_rdev expansion */
-
- int st_size;
-
- /*
- * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
- * but we don't have it under Linux.
- */
- int target_st_atime;
- unsigned int target_st_atime_nsec; /* Reserved for st_atime expansion */
-
- int target_st_mtime;
- unsigned int target_st_mtime_nsec; /* Reserved for st_mtime expansion */
-
- int target_st_ctime;
- unsigned int target_st_ctime_nsec; /* Reserved for st_ctime expansion */
-
- unsigned int st_blksize;
- unsigned int st_pad2;
-
- int st_blocks;
+ abi_ulong st_dev;
+ abi_ulong st_pad0[3]; /* Reserved for st_dev expansion */
+ uint64_t st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ int st_uid;
+ int st_gid;
+ abi_ulong st_rdev;
+ abi_ulong st_pad1[3]; /* Reserved for st_rdev expansion */
+ int64_t st_size;
+ abi_long target_st_atime;
+ abi_ulong target_st_atime_nsec; /* Reserved for st_atime expansion */
+ abi_long target_st_mtime;
+ abi_ulong target_st_mtime_nsec; /* Reserved for st_mtime expansion */
+ abi_long target_st_ctime;
+ abi_ulong target_st_ctime_nsec; /* Reserved for st_ctime expansion */
+ abi_ulong st_blksize;
+ abi_ulong st_pad2;
+ int64_t st_blocks;
};
#elif defined(TARGET_ABI_MIPSO32)
diff --git a/migration/rdma.c b/migration/rdma.c
index fc351eabf2..6bee30c6c1 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -121,7 +121,7 @@ enum {
RDMA_WRID_RECV_CONTROL = 4000,
};
-const char *wrid_desc[] = {
+static const char *wrid_desc[] = {
[RDMA_WRID_NONE] = "NONE",
[RDMA_WRID_RDMA_WRITE] = "WRITE RDMA",
[RDMA_WRID_SEND_CONTROL] = "CONTROL SEND",
@@ -160,7 +160,7 @@ enum {
RDMA_CONTROL_UNREGISTER_FINISHED, /* unpinning finished */
};
-const char *control_desc[] = {
+static const char *control_desc[] = {
[RDMA_CONTROL_NONE] = "NONE",
[RDMA_CONTROL_ERROR] = "ERROR",
[RDMA_CONTROL_READY] = "READY",
@@ -1121,9 +1121,6 @@ static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
/* allocate memory to store chunk MRs */
if (!block->pmr) {
block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *));
- if (!block->pmr) {
- return -1;
- }
}
/*
@@ -3253,14 +3250,14 @@ static int qemu_rdma_get_fd(void *opaque)
return rdma->comp_channel->fd;
}
-const QEMUFileOps rdma_read_ops = {
+static const QEMUFileOps rdma_read_ops = {
.get_buffer = qemu_rdma_get_buffer,
.get_fd = qemu_rdma_get_fd,
.close = qemu_rdma_close,
.hook_ram_load = qemu_rdma_registration_handle,
};
-const QEMUFileOps rdma_write_ops = {
+static const QEMUFileOps rdma_write_ops = {
.put_buffer = qemu_rdma_put_buffer,
.close = qemu_rdma_close,
.before_ram_iterate = qemu_rdma_registration_start,
diff --git a/monitor.c b/monitor.c
index 5a24311844..c3cc060b45 100644
--- a/monitor.c
+++ b/monitor.c
@@ -5368,9 +5368,12 @@ static void bdrv_password_cb(void *opaque, const char *password,
Monitor *mon = opaque;
BlockDriverState *bs = readline_opaque;
int ret = 0;
+ Error *local_err = NULL;
- if (bdrv_set_key(bs, password) != 0) {
- monitor_printf(mon, "invalid password\n");
+ bdrv_add_key(bs, password, &local_err);
+ if (local_err) {
+ monitor_printf(mon, "%s\n", error_get_pretty(local_err));
+ error_free(local_err);
ret = -EPERM;
}
if (mon->password_completion_cb)
@@ -5388,17 +5391,20 @@ int monitor_read_bdrv_key_start(Monitor *mon, BlockDriverState *bs,
BlockCompletionFunc *completion_cb,
void *opaque)
{
+ Error *local_err = NULL;
int err;
- if (!bdrv_key_required(bs)) {
+ bdrv_add_key(bs, NULL, &local_err);
+ if (!local_err) {
if (completion_cb)
completion_cb(opaque, 0);
return 0;
}
+ /* Need a key for @bs */
+
if (monitor_ctrl_mode(mon)) {
- qerror_report(QERR_DEVICE_ENCRYPTED, bdrv_get_device_name(bs),
- bdrv_get_encrypted_filename(bs));
+ qerror_report_err(local_err);
return -1;
}
diff --git a/nbd.c b/nbd.c
index 53cf82be0b..71159aff6a 100644
--- a/nbd.c
+++ b/nbd.c
@@ -494,7 +494,7 @@ fail:
}
int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags,
- off_t *size, size_t *blocksize)
+ off_t *size, size_t *blocksize, Error **errp)
{
char buf[256];
uint64_t magic, s;
@@ -506,13 +506,13 @@ int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags,
rc = -EINVAL;
if (read_sync(csock, buf, 8) != 8) {
- LOG("read failed");
+ error_setg(errp, "Failed to read data");
goto fail;
}
buf[8] = '\0';
if (strlen(buf) == 0) {
- LOG("server connection closed");
+ error_setg(errp, "Server connection closed unexpectedly");
goto fail;
}
@@ -527,12 +527,12 @@ int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags,
qemu_isprint(buf[7]) ? buf[7] : '.');
if (memcmp(buf, "NBDMAGIC", 8) != 0) {
- LOG("Invalid magic received");
+ error_setg(errp, "Invalid magic received");
goto fail;
}
if (read_sync(csock, &magic, sizeof(magic)) != sizeof(magic)) {
- LOG("read failed");
+ error_setg(errp, "Failed to read magic");
goto fail;
}
magic = be64_to_cpu(magic);
@@ -545,52 +545,60 @@ int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags,
TRACE("Checking magic (opts_magic)");
if (magic != NBD_OPTS_MAGIC) {
- LOG("Bad magic received");
+ if (magic == NBD_CLIENT_MAGIC) {
+ error_setg(errp, "Server does not support export names");
+ } else {
+ error_setg(errp, "Bad magic received");
+ }
goto fail;
}
if (read_sync(csock, &tmp, sizeof(tmp)) != sizeof(tmp)) {
- LOG("flags read failed");
+ error_setg(errp, "Failed to read server flags");
goto fail;
}
*flags = be16_to_cpu(tmp) << 16;
/* reserved for future use */
if (write_sync(csock, &reserved, sizeof(reserved)) !=
sizeof(reserved)) {
- LOG("write failed (reserved)");
+ error_setg(errp, "Failed to read reserved field");
goto fail;
}
/* write the export name */
magic = cpu_to_be64(magic);
if (write_sync(csock, &magic, sizeof(magic)) != sizeof(magic)) {
- LOG("write failed (magic)");
+ error_setg(errp, "Failed to send export name magic");
goto fail;
}
opt = cpu_to_be32(NBD_OPT_EXPORT_NAME);
if (write_sync(csock, &opt, sizeof(opt)) != sizeof(opt)) {
- LOG("write failed (opt)");
+ error_setg(errp, "Failed to send export name option number");
goto fail;
}
namesize = cpu_to_be32(strlen(name));
if (write_sync(csock, &namesize, sizeof(namesize)) !=
sizeof(namesize)) {
- LOG("write failed (namesize)");
+ error_setg(errp, "Failed to send export name length");
goto fail;
}
if (write_sync(csock, (char*)name, strlen(name)) != strlen(name)) {
- LOG("write failed (name)");
+ error_setg(errp, "Failed to send export name");
goto fail;
}
} else {
TRACE("Checking magic (cli_magic)");
if (magic != NBD_CLIENT_MAGIC) {
- LOG("Bad magic received");
+ if (magic == NBD_OPTS_MAGIC) {
+ error_setg(errp, "Server requires an export name");
+ } else {
+ error_setg(errp, "Bad magic received");
+ }
goto fail;
}
}
if (read_sync(csock, &s, sizeof(s)) != sizeof(s)) {
- LOG("read failed");
+ error_setg(errp, "Failed to read export length");
goto fail;
}
*size = be64_to_cpu(s);
@@ -599,19 +607,19 @@ int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags,
if (!name) {
if (read_sync(csock, flags, sizeof(*flags)) != sizeof(*flags)) {
- LOG("read failed (flags)");
+ error_setg(errp, "Failed to read export flags");
goto fail;
}
*flags = be32_to_cpup(flags);
} else {
if (read_sync(csock, &tmp, sizeof(tmp)) != sizeof(tmp)) {
- LOG("read failed (tmp)");
+ error_setg(errp, "Failed to read export flags");
goto fail;
}
*flags |= be32_to_cpu(tmp);
}
if (read_sync(csock, &buf, 124) != 124) {
- LOG("read failed (buf)");
+ error_setg(errp, "Failed to read reserved block");
goto fail;
}
rc = 0;
@@ -866,7 +874,7 @@ void nbd_client_put(NBDClient *client)
{
if (--client->refcount == 0) {
/* The last reference should be dropped by client->close,
- * which is called by nbd_client_close.
+ * which is called by client_close.
*/
assert(client->closing);
@@ -881,7 +889,7 @@ void nbd_client_put(NBDClient *client)
}
}
-void nbd_client_close(NBDClient *client)
+static void client_close(NBDClient *client)
{
if (client->closing) {
return;
@@ -1018,7 +1026,7 @@ void nbd_export_close(NBDExport *exp)
nbd_export_get(exp);
QTAILQ_FOREACH_SAFE(client, &exp->clients, next, next) {
- nbd_client_close(client);
+ client_close(client);
}
nbd_export_set_name(exp, NULL);
nbd_export_put(exp);
@@ -1303,7 +1311,7 @@ done:
out:
nbd_request_put(req);
- nbd_client_close(client);
+ client_close(client);
}
static void nbd_read(void *opaque)
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 80984d1660..a3fdaf02ba 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -257,6 +257,9 @@
#
# @cache: the cache mode used for the block device (since: 2.3)
#
+# @write_threshold: configured write threshold for the device.
+# 0 if disabled. (Since 2.3)
+#
# Since: 0.14.0
#
##
@@ -271,7 +274,8 @@
'*bps_max': 'int', '*bps_rd_max': 'int',
'*bps_wr_max': 'int', '*iops_max': 'int',
'*iops_rd_max': 'int', '*iops_wr_max': 'int',
- '*iops_size': 'int', 'cache': 'BlockdevCacheInfo' } }
+ '*iops_size': 'int', 'cache': 'BlockdevCacheInfo',
+ 'write_threshold': 'int' } }
##
# @BlockDeviceIoStatus:
@@ -407,13 +411,20 @@
# growable sparse files (like qcow2) that are used on top
# of a physical device.
#
+# @rd_merged: Number of read requests that have been merged into another
+# request (Since 2.3).
+#
+# @wr_merged: Number of write requests that have been merged into another
+# request (Since 2.3).
+#
# Since: 0.14.0
##
{ 'type': 'BlockDeviceStats',
'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'rd_operations': 'int',
'wr_operations': 'int', 'flush_operations': 'int',
'flush_total_time_ns': 'int', 'wr_total_time_ns': 'int',
- 'rd_total_time_ns': 'int', 'wr_highest_offset': 'int' } }
+ 'rd_total_time_ns': 'int', 'wr_highest_offset': 'int',
+ 'rd_merged': 'int', 'wr_merged': 'int' } }
##
# @BlockStats:
@@ -1910,3 +1921,48 @@
##
{ 'enum': 'PreallocMode',
'data': [ 'off', 'metadata', 'falloc', 'full' ] }
+
+##
+# @BLOCK_WRITE_THRESHOLD
+#
+# Emitted when writes on block device reaches or exceeds the
+# configured write threshold. For thin-provisioned devices, this
+# means the device should be extended to avoid pausing for
+# disk exhaustion.
+# The event is one shot. Once triggered, it needs to be
+# re-registered with another block-set-threshold command.
+#
+# @node-name: graph node name on which the threshold was exceeded.
+#
+# @amount-exceeded: amount of data which exceeded the threshold, in bytes.
+#
+# @write-threshold: last configured threshold, in bytes.
+#
+# Since: 2.3
+##
+{ 'event': 'BLOCK_WRITE_THRESHOLD',
+ 'data': { 'node-name': 'str',
+ 'amount-exceeded': 'uint64',
+ 'write-threshold': 'uint64' } }
+
+##
+# @block-set-write-threshold
+#
+# Change the write threshold for a block drive. An event will be delivered
+# if a write to this block drive crosses the configured threshold.
+# This is useful to transparently resize thin-provisioned drives without
+# the guest OS noticing.
+#
+# @node-name: graph node name on which the threshold must be set.
+#
+# @write-threshold: configured threshold for the block device, bytes.
+# Use 0 to disable the threshold.
+#
+# Returns: Nothing on success
+# If @node name is not found on the block device graph,
+# DeviceNotFound
+#
+# Since: 2.3
+##
+{ 'command': 'block-set-write-threshold',
+ 'data': { 'node-name': 'str', 'write-threshold': 'uint64' } }
diff --git a/qemu-img.c b/qemu-img.c
index 4e9a7f5741..0155e36283 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -35,7 +35,7 @@
#include "block/qapi.h"
#include <getopt.h>
-#define QEMU_IMG_VERSION "qemu-img version " QEMU_VERSION \
+#define QEMU_IMG_VERSION "qemu-img version " QEMU_VERSION QEMU_PKGVERSION \
", Copyright (c) 2004-2008 Fabrice Bellard\n"
typedef struct img_cmd_t {
@@ -261,6 +261,7 @@ static int print_block_option_help(const char *filename, const char *fmt)
{
BlockDriver *drv, *proto_drv;
QemuOptsList *create_opts = NULL;
+ Error *local_err = NULL;
/* Find driver and parse its options */
drv = bdrv_find_format(fmt);
@@ -271,9 +272,10 @@ static int print_block_option_help(const char *filename, const char *fmt)
create_opts = qemu_opts_append(create_opts, drv->create_opts);
if (filename) {
- proto_drv = bdrv_find_protocol(filename, true);
+ proto_drv = bdrv_find_protocol(filename, true, &local_err);
if (!proto_drv) {
- error_report("Unknown protocol '%s'", filename);
+ qerror_report_err(local_err);
+ error_free(local_err);
qemu_opts_free(create_opts);
return 1;
}
@@ -291,32 +293,24 @@ static BlockBackend *img_open(const char *id, const char *filename,
{
BlockBackend *blk;
BlockDriverState *bs;
- BlockDriver *drv;
char password[256];
Error *local_err = NULL;
- int ret;
-
- blk = blk_new_with_bs(id, &error_abort);
- bs = blk_bs(blk);
+ QDict *options = NULL;
if (fmt) {
- drv = bdrv_find_format(fmt);
- if (!drv) {
- error_report("Unknown file format '%s'", fmt);
- goto fail;
- }
- } else {
- drv = NULL;
+ options = qdict_new();
+ qdict_put(options, "driver", qstring_from_str(fmt));
}
- ret = bdrv_open(&bs, filename, NULL, NULL, flags, drv, &local_err);
- if (ret < 0) {
+ blk = blk_new_open(id, filename, NULL, options, flags, &local_err);
+ if (!blk) {
error_report("Could not open '%s': %s", filename,
error_get_pretty(local_err));
error_free(local_err);
goto fail;
}
+ bs = blk_bs(blk);
if (bdrv_is_encrypted(bs) && require_io) {
qprintf(quiet, "Disk image '%s' is encrypted.\n", filename);
if (read_password(password, sizeof(password)) < 0) {
@@ -1016,19 +1010,19 @@ static int64_t sectors_to_process(int64_t total, int64_t from)
* Returns 0 in case sectors are filled with 0, 1 if sectors contain non-zero
* data and negative value on error.
*
- * @param bs: Driver used for accessing file
+ * @param blk: BlockBackend for the image
* @param sect_num: Number of first sector to check
* @param sect_count: Number of sectors to check
* @param filename: Name of disk file we are checking (logging purpose)
* @param buffer: Allocated buffer for storing read data
* @param quiet: Flag for quiet mode
*/
-static int check_empty_sectors(BlockDriverState *bs, int64_t sect_num,
+static int check_empty_sectors(BlockBackend *blk, int64_t sect_num,
int sect_count, const char *filename,
uint8_t *buffer, bool quiet)
{
int pnum, ret = 0;
- ret = bdrv_read(bs, sect_num, buffer, sect_count);
+ ret = blk_read(blk, sect_num, buffer, sect_count);
if (ret < 0) {
error_report("Error while reading offset %" PRId64 " of %s: %s",
sectors_to_bytes(sect_num), filename, strerror(-ret));
@@ -1138,16 +1132,16 @@ static int img_compare(int argc, char **argv)
}
bs2 = blk_bs(blk2);
- buf1 = qemu_blockalign(bs1, IO_BUF_SIZE);
- buf2 = qemu_blockalign(bs2, IO_BUF_SIZE);
- total_sectors1 = bdrv_nb_sectors(bs1);
+ buf1 = blk_blockalign(blk1, IO_BUF_SIZE);
+ buf2 = blk_blockalign(blk2, IO_BUF_SIZE);
+ total_sectors1 = blk_nb_sectors(blk1);
if (total_sectors1 < 0) {
error_report("Can't get size of %s: %s",
filename1, strerror(-total_sectors1));
ret = 4;
goto out;
}
- total_sectors2 = bdrv_nb_sectors(bs2);
+ total_sectors2 = blk_nb_sectors(blk2);
if (total_sectors2 < 0) {
error_report("Can't get size of %s: %s",
filename2, strerror(-total_sectors2));
@@ -1189,7 +1183,7 @@ static int img_compare(int argc, char **argv)
if (allocated1 == allocated2) {
if (allocated1) {
- ret = bdrv_read(bs1, sector_num, buf1, nb_sectors);
+ ret = blk_read(blk1, sector_num, buf1, nb_sectors);
if (ret < 0) {
error_report("Error while reading offset %" PRId64 " of %s:"
" %s", sectors_to_bytes(sector_num), filename1,
@@ -1197,7 +1191,7 @@ static int img_compare(int argc, char **argv)
ret = 4;
goto out;
}
- ret = bdrv_read(bs2, sector_num, buf2, nb_sectors);
+ ret = blk_read(blk2, sector_num, buf2, nb_sectors);
if (ret < 0) {
error_report("Error while reading offset %" PRId64
" of %s: %s", sectors_to_bytes(sector_num),
@@ -1224,10 +1218,10 @@ static int img_compare(int argc, char **argv)
}
if (allocated1) {
- ret = check_empty_sectors(bs1, sector_num, nb_sectors,
+ ret = check_empty_sectors(blk1, sector_num, nb_sectors,
filename1, buf1, quiet);
} else {
- ret = check_empty_sectors(bs2, sector_num, nb_sectors,
+ ret = check_empty_sectors(blk2, sector_num, nb_sectors,
filename2, buf1, quiet);
}
if (ret) {
@@ -1244,18 +1238,18 @@ static int img_compare(int argc, char **argv)
}
if (total_sectors1 != total_sectors2) {
- BlockDriverState *bs_over;
+ BlockBackend *blk_over;
int64_t total_sectors_over;
const char *filename_over;
qprintf(quiet, "Warning: Image size mismatch!\n");
if (total_sectors1 > total_sectors2) {
total_sectors_over = total_sectors1;
- bs_over = bs1;
+ blk_over = blk1;
filename_over = filename1;
} else {
total_sectors_over = total_sectors2;
- bs_over = bs2;
+ blk_over = blk2;
filename_over = filename2;
}
@@ -1264,7 +1258,7 @@ static int img_compare(int argc, char **argv)
if (nb_sectors <= 0) {
break;
}
- ret = bdrv_is_allocated_above(bs_over, NULL, sector_num,
+ ret = bdrv_is_allocated_above(blk_bs(blk_over), NULL, sector_num,
nb_sectors, &pnum);
if (ret < 0) {
ret = 3;
@@ -1275,7 +1269,7 @@ static int img_compare(int argc, char **argv)
}
nb_sectors = pnum;
if (ret) {
- ret = check_empty_sectors(bs_over, sector_num, nb_sectors,
+ ret = check_empty_sectors(blk_over, sector_num, nb_sectors,
filename_over, buf1, quiet);
if (ret) {
if (ret < 0) {
@@ -1484,7 +1478,7 @@ static int img_convert(int argc, char **argv)
goto out;
}
bs[bs_i] = blk_bs(blk[bs_i]);
- bs_sectors[bs_i] = bdrv_nb_sectors(bs[bs_i]);
+ bs_sectors[bs_i] = blk_nb_sectors(blk[bs_i]);
if (bs_sectors[bs_i] < 0) {
error_report("Could not get size of %s: %s",
argv[optind + bs_i], strerror(-bs_sectors[bs_i]));
@@ -1524,41 +1518,44 @@ static int img_convert(int argc, char **argv)
goto out;
}
- proto_drv = bdrv_find_protocol(out_filename, true);
+ proto_drv = bdrv_find_protocol(out_filename, true, &local_err);
if (!proto_drv) {
- error_report("Unknown protocol '%s'", out_filename);
+ qerror_report_err(local_err);
+ error_free(local_err);
ret = -1;
goto out;
}
- if (!drv->create_opts) {
- error_report("Format driver '%s' does not support image creation",
- drv->format_name);
- ret = -1;
- goto out;
- }
+ if (!skip_create) {
+ if (!drv->create_opts) {
+ error_report("Format driver '%s' does not support image creation",
+ drv->format_name);
+ ret = -1;
+ goto out;
+ }
- if (!proto_drv->create_opts) {
- error_report("Protocol driver '%s' does not support image creation",
- proto_drv->format_name);
- ret = -1;
- goto out;
- }
+ if (!proto_drv->create_opts) {
+ error_report("Protocol driver '%s' does not support image creation",
+ proto_drv->format_name);
+ ret = -1;
+ goto out;
+ }
- create_opts = qemu_opts_append(create_opts, drv->create_opts);
- create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
+ create_opts = qemu_opts_append(create_opts, drv->create_opts);
+ create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
- opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
- if (options && qemu_opts_do_parse(opts, options, NULL)) {
- error_report("Invalid options for file format '%s'", out_fmt);
- ret = -1;
- goto out;
- }
+ opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
+ if (options && qemu_opts_do_parse(opts, options, NULL)) {
+ error_report("Invalid options for file format '%s'", out_fmt);
+ ret = -1;
+ goto out;
+ }
- qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_sectors * 512);
- ret = add_old_style_options(out_fmt, opts, out_baseimg, NULL);
- if (ret < 0) {
- goto out;
+ qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_sectors * 512);
+ ret = add_old_style_options(out_fmt, opts, out_baseimg, NULL);
+ if (ret < 0) {
+ goto out;
+ }
}
/* Get backing file name if -o backing_file was used */
@@ -1633,10 +1630,10 @@ static int img_convert(int argc, char **argv)
out_bs->bl.discard_alignment))
);
- buf = qemu_blockalign(out_bs, bufsectors * BDRV_SECTOR_SIZE);
+ buf = blk_blockalign(out_blk, bufsectors * BDRV_SECTOR_SIZE);
if (skip_create) {
- int64_t output_sectors = bdrv_nb_sectors(out_bs);
+ int64_t output_sectors = blk_nb_sectors(out_blk);
if (output_sectors < 0) {
error_report("unable to get output image length: %s\n",
strerror(-output_sectors));
@@ -1704,7 +1701,7 @@ static int img_convert(int argc, char **argv)
nlow = remainder > bs_sectors[bs_i] - bs_num
? bs_sectors[bs_i] - bs_num : remainder;
- ret = bdrv_read(bs[bs_i], bs_num, buf2, nlow);
+ ret = blk_read(blk[bs_i], bs_num, buf2, nlow);
if (ret < 0) {
error_report("error while reading sector %" PRId64 ": %s",
bs_num, strerror(-ret));
@@ -1719,7 +1716,7 @@ static int img_convert(int argc, char **argv)
assert (remainder == 0);
if (!buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)) {
- ret = bdrv_write_compressed(out_bs, sector_num, buf, n);
+ ret = blk_write_compressed(out_blk, sector_num, buf, n);
if (ret != 0) {
error_report("error while compressing sector %" PRId64
": %s", sector_num, strerror(-ret));
@@ -1730,7 +1727,7 @@ static int img_convert(int argc, char **argv)
qemu_progress_print(100.0 * sector_num / total_sectors, 0);
}
/* signal EOF to align */
- bdrv_write_compressed(out_bs, 0, NULL, 0);
+ blk_write_compressed(out_blk, 0, NULL, 0);
} else {
int64_t sectors_to_read, sectors_read, sector_num_next_status;
bool count_allocated_sectors;
@@ -1831,7 +1828,7 @@ restart:
}
n1 = n;
- ret = bdrv_read(bs[bs_i], sector_num - bs_offset, buf, n);
+ ret = blk_read(blk[bs_i], sector_num - bs_offset, buf, n);
if (ret < 0) {
error_report("error while reading sector %" PRId64 ": %s",
sector_num - bs_offset, strerror(-ret));
@@ -1844,7 +1841,7 @@ restart:
while (n > 0) {
if (!has_zero_init ||
is_allocated_sectors_min(buf1, n, &n1, min_sparse)) {
- ret = bdrv_write(out_bs, sector_num, buf1, n1);
+ ret = blk_write(out_blk, sector_num, buf1, n1);
if (ret < 0) {
error_report("error while writing sector %" PRId64
": %s", sector_num, strerror(-ret));
@@ -2268,7 +2265,7 @@ static int img_map(int argc, char **argv)
printf("%-16s%-16s%-16s%s\n", "Offset", "Length", "Mapped to", "File");
}
- length = bdrv_getlength(bs);
+ length = blk_getlength(blk);
while (curr.start + curr.length < length) {
int64_t nsectors_left;
int64_t sector_num;
@@ -2437,8 +2434,7 @@ static int img_snapshot(int argc, char **argv)
static int img_rebase(int argc, char **argv)
{
BlockBackend *blk = NULL, *blk_old_backing = NULL, *blk_new_backing = NULL;
- BlockDriverState *bs = NULL, *bs_old_backing = NULL, *bs_new_backing = NULL;
- BlockDriver *old_backing_drv, *new_backing_drv;
+ BlockDriverState *bs = NULL;
char *filename;
const char *fmt, *cache, *src_cache, *out_basefmt, *out_baseimg;
int c, flags, src_flags, ret;
@@ -2532,22 +2528,8 @@ static int img_rebase(int argc, char **argv)
}
bs = blk_bs(blk);
- /* Find the right drivers for the backing files */
- old_backing_drv = NULL;
- new_backing_drv = NULL;
-
- if (!unsafe && bs->backing_format[0] != '\0') {
- old_backing_drv = bdrv_find_format(bs->backing_format);
- if (old_backing_drv == NULL) {
- error_report("Invalid format name: '%s'", bs->backing_format);
- ret = -1;
- goto out;
- }
- }
-
if (out_basefmt != NULL) {
- new_backing_drv = bdrv_find_format(out_basefmt);
- if (new_backing_drv == NULL) {
+ if (bdrv_find_format(out_basefmt) == NULL) {
error_report("Invalid format name: '%s'", out_basefmt);
ret = -1;
goto out;
@@ -2557,24 +2539,34 @@ static int img_rebase(int argc, char **argv)
/* For safe rebasing we need to compare old and new backing file */
if (!unsafe) {
char backing_name[PATH_MAX];
+ QDict *options = NULL;
+
+ if (bs->backing_format[0] != '\0') {
+ options = qdict_new();
+ qdict_put(options, "driver", qstring_from_str(bs->backing_format));
+ }
- blk_old_backing = blk_new_with_bs("old_backing", &error_abort);
- bs_old_backing = blk_bs(blk_old_backing);
bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name));
- ret = bdrv_open(&bs_old_backing, backing_name, NULL, NULL, src_flags,
- old_backing_drv, &local_err);
- if (ret) {
+ blk_old_backing = blk_new_open("old_backing", backing_name, NULL,
+ options, src_flags, &local_err);
+ if (!blk_old_backing) {
error_report("Could not open old backing file '%s': %s",
backing_name, error_get_pretty(local_err));
error_free(local_err);
goto out;
}
+
if (out_baseimg[0]) {
- blk_new_backing = blk_new_with_bs("new_backing", &error_abort);
- bs_new_backing = blk_bs(blk_new_backing);
- ret = bdrv_open(&bs_new_backing, out_baseimg, NULL, NULL, src_flags,
- new_backing_drv, &local_err);
- if (ret) {
+ if (out_basefmt) {
+ options = qdict_new();
+ qdict_put(options, "driver", qstring_from_str(out_basefmt));
+ } else {
+ options = NULL;
+ }
+
+ blk_new_backing = blk_new_open("new_backing", out_baseimg, NULL,
+ options, src_flags, &local_err);
+ if (!blk_new_backing) {
error_report("Could not open new backing file '%s': %s",
out_baseimg, error_get_pretty(local_err));
error_free(local_err);
@@ -2602,17 +2594,17 @@ static int img_rebase(int argc, char **argv)
uint8_t * buf_new;
float local_progress = 0;
- buf_old = qemu_blockalign(bs, IO_BUF_SIZE);
- buf_new = qemu_blockalign(bs, IO_BUF_SIZE);
+ buf_old = blk_blockalign(blk, IO_BUF_SIZE);
+ buf_new = blk_blockalign(blk, IO_BUF_SIZE);
- num_sectors = bdrv_nb_sectors(bs);
+ num_sectors = blk_nb_sectors(blk);
if (num_sectors < 0) {
error_report("Could not get size of '%s': %s",
filename, strerror(-num_sectors));
ret = -1;
goto out;
}
- old_backing_num_sectors = bdrv_nb_sectors(bs_old_backing);
+ old_backing_num_sectors = blk_nb_sectors(blk_old_backing);
if (old_backing_num_sectors < 0) {
char backing_name[PATH_MAX];
@@ -2622,8 +2614,8 @@ static int img_rebase(int argc, char **argv)
ret = -1;
goto out;
}
- if (bs_new_backing) {
- new_backing_num_sectors = bdrv_nb_sectors(bs_new_backing);
+ if (blk_new_backing) {
+ new_backing_num_sectors = blk_nb_sectors(blk_new_backing);
if (new_backing_num_sectors < 0) {
error_report("Could not get size of '%s': %s",
out_baseimg, strerror(-new_backing_num_sectors));
@@ -2668,21 +2660,21 @@ static int img_rebase(int argc, char **argv)
n = old_backing_num_sectors - sector;
}
- ret = bdrv_read(bs_old_backing, sector, buf_old, n);
+ ret = blk_read(blk_old_backing, sector, buf_old, n);
if (ret < 0) {
error_report("error while reading from old backing file");
goto out;
}
}
- if (sector >= new_backing_num_sectors || !bs_new_backing) {
+ if (sector >= new_backing_num_sectors || !blk_new_backing) {
memset(buf_new, 0, n * BDRV_SECTOR_SIZE);
} else {
if (sector + n > new_backing_num_sectors) {
n = new_backing_num_sectors - sector;
}
- ret = bdrv_read(bs_new_backing, sector, buf_new, n);
+ ret = blk_read(blk_new_backing, sector, buf_new, n);
if (ret < 0) {
error_report("error while reading from new backing file");
goto out;
@@ -2698,8 +2690,8 @@ static int img_rebase(int argc, char **argv)
if (compare_sectors(buf_old + written * 512,
buf_new + written * 512, n - written, &pnum))
{
- ret = bdrv_write(bs, sector + written,
- buf_old + written * 512, pnum);
+ ret = blk_write(blk, sector + written,
+ buf_old + written * 512, pnum);
if (ret < 0) {
error_report("Error while writing to COW image: %s",
strerror(-ret));
@@ -2764,7 +2756,6 @@ static int img_resize(int argc, char **argv)
int64_t n, total_size;
bool quiet = false;
BlockBackend *blk = NULL;
- BlockDriverState *bs = NULL;
QemuOpts *param;
static QemuOptsList resize_options = {
.name = "resize_options",
@@ -2846,10 +2837,9 @@ static int img_resize(int argc, char **argv)
ret = -1;
goto out;
}
- bs = blk_bs(blk);
if (relative) {
- total_size = bdrv_getlength(bs) + n * relative;
+ total_size = blk_getlength(blk) + n * relative;
} else {
total_size = n;
}
@@ -2859,7 +2849,7 @@ static int img_resize(int argc, char **argv)
goto out;
}
- ret = bdrv_truncate(bs, total_size);
+ ret = blk_truncate(blk, total_size);
switch (ret) {
case 0:
qprintf(quiet, "Image resized.\n");
diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
index e70855254a..1afcfc01a5 100644
--- a/qemu-io-cmds.c
+++ b/qemu-io-cmds.c
@@ -9,10 +9,13 @@
*/
#include "qemu-io.h"
-#include "block/block_int.h"
+#include "sysemu/block-backend.h"
+#include "block/block.h"
+#include "block/block_int.h" /* for info_f() */
#include "block/qapi.h"
#include "qemu/main-loop.h"
#include "qemu/timer.h"
+#include "sysemu/block-backend.h"
#define CMD_NOFILE_OK 0x01
@@ -40,24 +43,24 @@ int qemuio_command_usage(const cmdinfo_t *ci)
return 0;
}
-static int init_check_command(BlockDriverState *bs, const cmdinfo_t *ct)
+static int init_check_command(BlockBackend *blk, const cmdinfo_t *ct)
{
if (ct->flags & CMD_FLAG_GLOBAL) {
return 1;
}
- if (!(ct->flags & CMD_NOFILE_OK) && !bs) {
+ if (!(ct->flags & CMD_NOFILE_OK) && !blk) {
fprintf(stderr, "no file open, try 'help open'\n");
return 0;
}
return 1;
}
-static int command(BlockDriverState *bs, const cmdinfo_t *ct, int argc,
+static int command(BlockBackend *blk, const cmdinfo_t *ct, int argc,
char **argv)
{
char *cmd = argv[0];
- if (!init_check_command(bs, ct)) {
+ if (!init_check_command(blk, ct)) {
return 0;
}
@@ -78,7 +81,7 @@ static int command(BlockDriverState *bs, const cmdinfo_t *ct, int argc,
return 0;
}
optind = 0;
- return ct->cfunc(bs, argc, argv);
+ return ct->cfunc(blk, argc, argv);
}
static const cmdinfo_t *find_command(const char *cmd)
@@ -267,14 +270,14 @@ static int parse_pattern(const char *arg)
*/
#define MISALIGN_OFFSET 16
-static void *qemu_io_alloc(BlockDriverState *bs, size_t len, int pattern)
+static void *qemu_io_alloc(BlockBackend *blk, size_t len, int pattern)
{
void *buf;
if (qemuio_misalign) {
len += MISALIGN_OFFSET;
}
- buf = qemu_blockalign(bs, len);
+ buf = blk_blockalign(blk, len);
memset(buf, pattern, len);
if (qemuio_misalign) {
buf += MISALIGN_OFFSET;
@@ -340,7 +343,7 @@ static void print_report(const char *op, struct timeval *t, int64_t offset,
* vector matching it.
*/
static void *
-create_iovec(BlockDriverState *bs, QEMUIOVector *qiov, char **argv, int nr_iov,
+create_iovec(BlockBackend *blk, QEMUIOVector *qiov, char **argv, int nr_iov,
int pattern)
{
size_t *sizes = g_new0(size_t, nr_iov);
@@ -377,7 +380,7 @@ create_iovec(BlockDriverState *bs, QEMUIOVector *qiov, char **argv, int nr_iov,
qemu_iovec_init(qiov, nr_iov);
- buf = p = qemu_io_alloc(bs, count, pattern);
+ buf = p = qemu_io_alloc(blk, count, pattern);
for (i = 0; i < nr_iov; i++) {
qemu_iovec_add(qiov, p, sizes[i]);
@@ -389,12 +392,12 @@ fail:
return buf;
}
-static int do_read(BlockDriverState *bs, char *buf, int64_t offset, int count,
+static int do_read(BlockBackend *blk, char *buf, int64_t offset, int count,
int *total)
{
int ret;
- ret = bdrv_read(bs, offset >> 9, (uint8_t *)buf, count >> 9);
+ ret = blk_read(blk, offset >> 9, (uint8_t *)buf, count >> 9);
if (ret < 0) {
return ret;
}
@@ -402,12 +405,12 @@ static int do_read(BlockDriverState *bs, char *buf, int64_t offset, int count,
return 1;
}
-static int do_write(BlockDriverState *bs, char *buf, int64_t offset, int count,
+static int do_write(BlockBackend *blk, char *buf, int64_t offset, int count,
int *total)
{
int ret;
- ret = bdrv_write(bs, offset >> 9, (uint8_t *)buf, count >> 9);
+ ret = blk_write(blk, offset >> 9, (uint8_t *)buf, count >> 9);
if (ret < 0) {
return ret;
}
@@ -415,20 +418,20 @@ static int do_write(BlockDriverState *bs, char *buf, int64_t offset, int count,
return 1;
}
-static int do_pread(BlockDriverState *bs, char *buf, int64_t offset, int count,
+static int do_pread(BlockBackend *blk, char *buf, int64_t offset, int count,
int *total)
{
- *total = bdrv_pread(bs, offset, (uint8_t *)buf, count);
+ *total = blk_pread(blk, offset, (uint8_t *)buf, count);
if (*total < 0) {
return *total;
}
return 1;
}
-static int do_pwrite(BlockDriverState *bs, char *buf, int64_t offset, int count,
+static int do_pwrite(BlockBackend *blk, char *buf, int64_t offset, int count,
int *total)
{
- *total = bdrv_pwrite(bs, offset, (uint8_t *)buf, count);
+ *total = blk_pwrite(blk, offset, (uint8_t *)buf, count);
if (*total < 0) {
return *total;
}
@@ -436,7 +439,7 @@ static int do_pwrite(BlockDriverState *bs, char *buf, int64_t offset, int count,
}
typedef struct {
- BlockDriverState *bs;
+ BlockBackend *blk;
int64_t offset;
int count;
int *total;
@@ -448,8 +451,8 @@ static void coroutine_fn co_write_zeroes_entry(void *opaque)
{
CoWriteZeroes *data = opaque;
- data->ret = bdrv_co_write_zeroes(data->bs, data->offset / BDRV_SECTOR_SIZE,
- data->count / BDRV_SECTOR_SIZE, 0);
+ data->ret = blk_co_write_zeroes(data->blk, data->offset / BDRV_SECTOR_SIZE,
+ data->count / BDRV_SECTOR_SIZE, 0);
data->done = true;
if (data->ret < 0) {
*data->total = data->ret;
@@ -459,12 +462,12 @@ static void coroutine_fn co_write_zeroes_entry(void *opaque)
*data->total = data->count;
}
-static int do_co_write_zeroes(BlockDriverState *bs, int64_t offset, int count,
+static int do_co_write_zeroes(BlockBackend *blk, int64_t offset, int count,
int *total)
{
Coroutine *co;
CoWriteZeroes data = {
- .bs = bs,
+ .blk = blk,
.offset = offset,
.count = count,
.total = total,
@@ -474,7 +477,7 @@ static int do_co_write_zeroes(BlockDriverState *bs, int64_t offset, int count,
co = qemu_coroutine_create(co_write_zeroes_entry);
qemu_coroutine_enter(co, &data);
while (!data.done) {
- aio_poll(bdrv_get_aio_context(bs), true);
+ aio_poll(blk_get_aio_context(blk), true);
}
if (data.ret < 0) {
return data.ret;
@@ -483,12 +486,12 @@ static int do_co_write_zeroes(BlockDriverState *bs, int64_t offset, int count,
}
}
-static int do_write_compressed(BlockDriverState *bs, char *buf, int64_t offset,
+static int do_write_compressed(BlockBackend *blk, char *buf, int64_t offset,
int count, int *total)
{
int ret;
- ret = bdrv_write_compressed(bs, offset >> 9, (uint8_t *)buf, count >> 9);
+ ret = blk_write_compressed(blk, offset >> 9, (uint8_t *)buf, count >> 9);
if (ret < 0) {
return ret;
}
@@ -496,20 +499,20 @@ static int do_write_compressed(BlockDriverState *bs, char *buf, int64_t offset,
return 1;
}
-static int do_load_vmstate(BlockDriverState *bs, char *buf, int64_t offset,
+static int do_load_vmstate(BlockBackend *blk, char *buf, int64_t offset,
int count, int *total)
{
- *total = bdrv_load_vmstate(bs, (uint8_t *)buf, offset, count);
+ *total = blk_load_vmstate(blk, (uint8_t *)buf, offset, count);
if (*total < 0) {
return *total;
}
return 1;
}
-static int do_save_vmstate(BlockDriverState *bs, char *buf, int64_t offset,
+static int do_save_vmstate(BlockBackend *blk, char *buf, int64_t offset,
int count, int *total)
{
- *total = bdrv_save_vmstate(bs, (uint8_t *)buf, offset, count);
+ *total = blk_save_vmstate(blk, (uint8_t *)buf, offset, count);
if (*total < 0) {
return *total;
}
@@ -522,13 +525,13 @@ static void aio_rw_done(void *opaque, int ret)
*(int *)opaque = ret;
}
-static int do_aio_readv(BlockDriverState *bs, QEMUIOVector *qiov,
+static int do_aio_readv(BlockBackend *blk, QEMUIOVector *qiov,
int64_t offset, int *total)
{
int async_ret = NOT_DONE;
- bdrv_aio_readv(bs, offset >> 9, qiov, qiov->size >> 9,
- aio_rw_done, &async_ret);
+ blk_aio_readv(blk, offset >> 9, qiov, qiov->size >> 9,
+ aio_rw_done, &async_ret);
while (async_ret == NOT_DONE) {
main_loop_wait(false);
}
@@ -537,13 +540,13 @@ static int do_aio_readv(BlockDriverState *bs, QEMUIOVector *qiov,
return async_ret < 0 ? async_ret : 1;
}
-static int do_aio_writev(BlockDriverState *bs, QEMUIOVector *qiov,
+static int do_aio_writev(BlockBackend *blk, QEMUIOVector *qiov,
int64_t offset, int *total)
{
int async_ret = NOT_DONE;
- bdrv_aio_writev(bs, offset >> 9, qiov, qiov->size >> 9,
- aio_rw_done, &async_ret);
+ blk_aio_writev(blk, offset >> 9, qiov, qiov->size >> 9,
+ aio_rw_done, &async_ret);
while (async_ret == NOT_DONE) {
main_loop_wait(false);
}
@@ -567,7 +570,7 @@ static void multiwrite_cb(void *opaque, int ret)
}
}
-static int do_aio_multiwrite(BlockDriverState *bs, BlockRequest* reqs,
+static int do_aio_multiwrite(BlockBackend *blk, BlockRequest* reqs,
int num_reqs, int *total)
{
int i, ret;
@@ -583,7 +586,7 @@ static int do_aio_multiwrite(BlockDriverState *bs, BlockRequest* reqs,
*total += reqs[i].qiov->size;
}
- ret = bdrv_aio_multiwrite(bs, reqs, num_reqs);
+ ret = blk_aio_multiwrite(blk, reqs, num_reqs);
if (ret < 0) {
return ret;
}
@@ -609,7 +612,7 @@ static void read_help(void)
" -b, -- read from the VM state rather than the virtual disk\n"
" -C, -- report statistics in a machine parsable format\n"
" -l, -- length for pattern verification (only with -P)\n"
-" -p, -- use bdrv_pread to read the file\n"
+" -p, -- use blk_pread to read the file\n"
" -P, -- use a pattern to verify read data\n"
" -q, -- quiet mode, do not show I/O statistics\n"
" -s, -- start offset for pattern verification (only with -P)\n"
@@ -617,7 +620,7 @@ static void read_help(void)
"\n");
}
-static int read_f(BlockDriverState *bs, int argc, char **argv);
+static int read_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t read_cmd = {
.name = "read",
@@ -630,7 +633,7 @@ static const cmdinfo_t read_cmd = {
.help = read_help,
};
-static int read_f(BlockDriverState *bs, int argc, char **argv)
+static int read_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
int Cflag = 0, pflag = 0, qflag = 0, vflag = 0;
@@ -736,15 +739,15 @@ static int read_f(BlockDriverState *bs, int argc, char **argv)
}
}
- buf = qemu_io_alloc(bs, count, 0xab);
+ buf = qemu_io_alloc(blk, count, 0xab);
gettimeofday(&t1, NULL);
if (pflag) {
- cnt = do_pread(bs, buf, offset, count, &total);
+ cnt = do_pread(blk, buf, offset, count, &total);
} else if (bflag) {
- cnt = do_load_vmstate(bs, buf, offset, count, &total);
+ cnt = do_load_vmstate(blk, buf, offset, count, &total);
} else {
- cnt = do_read(bs, buf, offset, count, &total);
+ cnt = do_read(blk, buf, offset, count, &total);
}
gettimeofday(&t2, NULL);
@@ -801,7 +804,7 @@ static void readv_help(void)
"\n");
}
-static int readv_f(BlockDriverState *bs, int argc, char **argv);
+static int readv_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t readv_cmd = {
.name = "readv",
@@ -813,7 +816,7 @@ static const cmdinfo_t readv_cmd = {
.help = readv_help,
};
-static int readv_f(BlockDriverState *bs, int argc, char **argv)
+static int readv_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
int Cflag = 0, qflag = 0, vflag = 0;
@@ -869,13 +872,13 @@ static int readv_f(BlockDriverState *bs, int argc, char **argv)
}
nr_iov = argc - optind;
- buf = create_iovec(bs, &qiov, &argv[optind], nr_iov, 0xab);
+ buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, 0xab);
if (buf == NULL) {
return 0;
}
gettimeofday(&t1, NULL);
- cnt = do_aio_readv(bs, &qiov, offset, &total);
+ cnt = do_aio_readv(blk, &qiov, offset, &total);
gettimeofday(&t2, NULL);
if (cnt < 0) {
@@ -923,16 +926,16 @@ static void write_help(void)
" Writes into a segment of the currently open file, using a buffer\n"
" filled with a set pattern (0xcdcdcdcd).\n"
" -b, -- write to the VM state rather than the virtual disk\n"
-" -c, -- write compressed data with bdrv_write_compressed\n"
-" -p, -- use bdrv_pwrite to write the file\n"
+" -c, -- write compressed data with blk_write_compressed\n"
+" -p, -- use blk_pwrite to write the file\n"
" -P, -- use different pattern to fill file\n"
" -C, -- report statistics in a machine parsable format\n"
" -q, -- quiet mode, do not show I/O statistics\n"
-" -z, -- write zeroes using bdrv_co_write_zeroes\n"
+" -z, -- write zeroes using blk_co_write_zeroes\n"
"\n");
}
-static int write_f(BlockDriverState *bs, int argc, char **argv);
+static int write_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t write_cmd = {
.name = "write",
@@ -945,7 +948,7 @@ static const cmdinfo_t write_cmd = {
.help = write_help,
};
-static int write_f(BlockDriverState *bs, int argc, char **argv)
+static int write_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
int Cflag = 0, pflag = 0, qflag = 0, bflag = 0, Pflag = 0, zflag = 0;
@@ -1032,20 +1035,20 @@ static int write_f(BlockDriverState *bs, int argc, char **argv)
}
if (!zflag) {
- buf = qemu_io_alloc(bs, count, pattern);
+ buf = qemu_io_alloc(blk, count, pattern);
}
gettimeofday(&t1, NULL);
if (pflag) {
- cnt = do_pwrite(bs, buf, offset, count, &total);
+ cnt = do_pwrite(blk, buf, offset, count, &total);
} else if (bflag) {
- cnt = do_save_vmstate(bs, buf, offset, count, &total);
+ cnt = do_save_vmstate(blk, buf, offset, count, &total);
} else if (zflag) {
- cnt = do_co_write_zeroes(bs, offset, count, &total);
+ cnt = do_co_write_zeroes(blk, offset, count, &total);
} else if (cflag) {
- cnt = do_write_compressed(bs, buf, offset, count, &total);
+ cnt = do_write_compressed(blk, buf, offset, count, &total);
} else {
- cnt = do_write(bs, buf, offset, count, &total);
+ cnt = do_write(blk, buf, offset, count, &total);
}
gettimeofday(&t2, NULL);
@@ -1088,7 +1091,7 @@ writev_help(void)
"\n");
}
-static int writev_f(BlockDriverState *bs, int argc, char **argv);
+static int writev_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t writev_cmd = {
.name = "writev",
@@ -1100,7 +1103,7 @@ static const cmdinfo_t writev_cmd = {
.help = writev_help,
};
-static int writev_f(BlockDriverState *bs, int argc, char **argv)
+static int writev_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
int Cflag = 0, qflag = 0;
@@ -1150,13 +1153,13 @@ static int writev_f(BlockDriverState *bs, int argc, char **argv)
}
nr_iov = argc - optind;
- buf = create_iovec(bs, &qiov, &argv[optind], nr_iov, pattern);
+ buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, pattern);
if (buf == NULL) {
return 0;
}
gettimeofday(&t1, NULL);
- cnt = do_aio_writev(bs, &qiov, offset, &total);
+ cnt = do_aio_writev(blk, &qiov, offset, &total);
gettimeofday(&t2, NULL);
if (cnt < 0) {
@@ -1197,7 +1200,7 @@ static void multiwrite_help(void)
"\n");
}
-static int multiwrite_f(BlockDriverState *bs, int argc, char **argv);
+static int multiwrite_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t multiwrite_cmd = {
.name = "multiwrite",
@@ -1209,7 +1212,7 @@ static const cmdinfo_t multiwrite_cmd = {
.help = multiwrite_help,
};
-static int multiwrite_f(BlockDriverState *bs, int argc, char **argv)
+static int multiwrite_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
int Cflag = 0, qflag = 0;
@@ -1290,7 +1293,7 @@ static int multiwrite_f(BlockDriverState *bs, int argc, char **argv)
nr_iov = j - optind;
/* Build request */
- buf[i] = create_iovec(bs, &qiovs[i], &argv[optind], nr_iov, pattern);
+ buf[i] = create_iovec(blk, &qiovs[i], &argv[optind], nr_iov, pattern);
if (buf[i] == NULL) {
goto out;
}
@@ -1308,7 +1311,7 @@ static int multiwrite_f(BlockDriverState *bs, int argc, char **argv)
nr_reqs = i;
gettimeofday(&t1, NULL);
- cnt = do_aio_multiwrite(bs, reqs, nr_reqs, &total);
+ cnt = do_aio_multiwrite(blk, reqs, nr_reqs, &total);
gettimeofday(&t2, NULL);
if (cnt < 0) {
@@ -1337,6 +1340,7 @@ out:
}
struct aio_ctx {
+ BlockBackend *blk;
QEMUIOVector qiov;
int64_t offset;
char *buf;
@@ -1344,6 +1348,7 @@ struct aio_ctx {
int vflag;
int Cflag;
int Pflag;
+ BlockAcctCookie acct;
int pattern;
struct timeval t1;
};
@@ -1361,6 +1366,8 @@ static void aio_write_done(void *opaque, int ret)
goto out;
}
+ block_acct_done(blk_get_stats(ctx->blk), &ctx->acct);
+
if (ctx->qflag) {
goto out;
}
@@ -1398,6 +1405,8 @@ static void aio_read_done(void *opaque, int ret)
g_free(cmp_buf);
}
+ block_acct_done(blk_get_stats(ctx->blk), &ctx->acct);
+
if (ctx->qflag) {
goto out;
}
@@ -1436,7 +1445,7 @@ static void aio_read_help(void)
"\n");
}
-static int aio_read_f(BlockDriverState *bs, int argc, char **argv);
+static int aio_read_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t aio_read_cmd = {
.name = "aio_read",
@@ -1448,11 +1457,12 @@ static const cmdinfo_t aio_read_cmd = {
.help = aio_read_help,
};
-static int aio_read_f(BlockDriverState *bs, int argc, char **argv)
+static int aio_read_f(BlockBackend *blk, int argc, char **argv)
{
int nr_iov, c;
struct aio_ctx *ctx = g_new0(struct aio_ctx, 1);
+ ctx->blk = blk;
while ((c = getopt(argc, argv, "CP:qv")) != EOF) {
switch (c) {
case 'C':
@@ -1499,15 +1509,17 @@ static int aio_read_f(BlockDriverState *bs, int argc, char **argv)
}
nr_iov = argc - optind;
- ctx->buf = create_iovec(bs, &ctx->qiov, &argv[optind], nr_iov, 0xab);
+ ctx->buf = create_iovec(blk, &ctx->qiov, &argv[optind], nr_iov, 0xab);
if (ctx->buf == NULL) {
g_free(ctx);
return 0;
}
gettimeofday(&ctx->t1, NULL);
- bdrv_aio_readv(bs, ctx->offset >> 9, &ctx->qiov,
- ctx->qiov.size >> 9, aio_read_done, ctx);
+ block_acct_start(blk_get_stats(blk), &ctx->acct, ctx->qiov.size,
+ BLOCK_ACCT_READ);
+ blk_aio_readv(blk, ctx->offset >> 9, &ctx->qiov,
+ ctx->qiov.size >> 9, aio_read_done, ctx);
return 0;
}
@@ -1531,7 +1543,7 @@ static void aio_write_help(void)
"\n");
}
-static int aio_write_f(BlockDriverState *bs, int argc, char **argv);
+static int aio_write_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t aio_write_cmd = {
.name = "aio_write",
@@ -1543,12 +1555,13 @@ static const cmdinfo_t aio_write_cmd = {
.help = aio_write_help,
};
-static int aio_write_f(BlockDriverState *bs, int argc, char **argv)
+static int aio_write_f(BlockBackend *blk, int argc, char **argv)
{
int nr_iov, c;
int pattern = 0xcd;
struct aio_ctx *ctx = g_new0(struct aio_ctx, 1);
+ ctx->blk = blk;
while ((c = getopt(argc, argv, "CqP:")) != EOF) {
switch (c) {
case 'C':
@@ -1591,21 +1604,23 @@ static int aio_write_f(BlockDriverState *bs, int argc, char **argv)
}
nr_iov = argc - optind;
- ctx->buf = create_iovec(bs, &ctx->qiov, &argv[optind], nr_iov, pattern);
+ ctx->buf = create_iovec(blk, &ctx->qiov, &argv[optind], nr_iov, pattern);
if (ctx->buf == NULL) {
g_free(ctx);
return 0;
}
gettimeofday(&ctx->t1, NULL);
- bdrv_aio_writev(bs, ctx->offset >> 9, &ctx->qiov,
- ctx->qiov.size >> 9, aio_write_done, ctx);
+ block_acct_start(blk_get_stats(blk), &ctx->acct, ctx->qiov.size,
+ BLOCK_ACCT_WRITE);
+ blk_aio_writev(blk, ctx->offset >> 9, &ctx->qiov,
+ ctx->qiov.size >> 9, aio_write_done, ctx);
return 0;
}
-static int aio_flush_f(BlockDriverState *bs, int argc, char **argv)
+static int aio_flush_f(BlockBackend *blk, int argc, char **argv)
{
- bdrv_drain_all();
+ blk_drain_all();
return 0;
}
@@ -1615,9 +1630,9 @@ static const cmdinfo_t aio_flush_cmd = {
.oneline = "completes all outstanding aio requests"
};
-static int flush_f(BlockDriverState *bs, int argc, char **argv)
+static int flush_f(BlockBackend *blk, int argc, char **argv)
{
- bdrv_flush(bs);
+ blk_flush(blk);
return 0;
}
@@ -1628,7 +1643,7 @@ static const cmdinfo_t flush_cmd = {
.oneline = "flush all in-core file state to disk",
};
-static int truncate_f(BlockDriverState *bs, int argc, char **argv)
+static int truncate_f(BlockBackend *blk, int argc, char **argv)
{
int64_t offset;
int ret;
@@ -1639,7 +1654,7 @@ static int truncate_f(BlockDriverState *bs, int argc, char **argv)
return 0;
}
- ret = bdrv_truncate(bs, offset);
+ ret = blk_truncate(blk, offset);
if (ret < 0) {
printf("truncate: %s\n", strerror(-ret));
return 0;
@@ -1658,12 +1673,12 @@ static const cmdinfo_t truncate_cmd = {
.oneline = "truncates the current file at the given offset",
};
-static int length_f(BlockDriverState *bs, int argc, char **argv)
+static int length_f(BlockBackend *blk, int argc, char **argv)
{
int64_t size;
char s1[64];
- size = bdrv_getlength(bs);
+ size = blk_getlength(blk);
if (size < 0) {
printf("getlength: %s\n", strerror(-size));
return 0;
@@ -1683,8 +1698,9 @@ static const cmdinfo_t length_cmd = {
};
-static int info_f(BlockDriverState *bs, int argc, char **argv)
+static int info_f(BlockBackend *blk, int argc, char **argv)
{
+ BlockDriverState *bs = blk_bs(blk);
BlockDriverInfo bdi;
ImageInfoSpecific *spec_info;
char s1[64], s2[64];
@@ -1742,7 +1758,7 @@ static void discard_help(void)
"\n");
}
-static int discard_f(BlockDriverState *bs, int argc, char **argv);
+static int discard_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t discard_cmd = {
.name = "discard",
@@ -1755,7 +1771,7 @@ static const cmdinfo_t discard_cmd = {
.help = discard_help,
};
-static int discard_f(BlockDriverState *bs, int argc, char **argv)
+static int discard_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
int Cflag = 0, qflag = 0;
@@ -1794,8 +1810,8 @@ static int discard_f(BlockDriverState *bs, int argc, char **argv)
}
gettimeofday(&t1, NULL);
- ret = bdrv_discard(bs, offset >> BDRV_SECTOR_BITS,
- count >> BDRV_SECTOR_BITS);
+ ret = blk_discard(blk, offset >> BDRV_SECTOR_BITS,
+ count >> BDRV_SECTOR_BITS);
gettimeofday(&t2, NULL);
if (ret < 0) {
@@ -1813,8 +1829,9 @@ out:
return 0;
}
-static int alloc_f(BlockDriverState *bs, int argc, char **argv)
+static int alloc_f(BlockBackend *blk, int argc, char **argv)
{
+ BlockDriverState *bs = blk_bs(blk);
int64_t offset, sector_num;
int nb_sectors, remaining;
char s1[64];
@@ -1910,20 +1927,27 @@ static int map_is_allocated(BlockDriverState *bs, int64_t sector_num,
return firstret;
}
-static int map_f(BlockDriverState *bs, int argc, char **argv)
+static int map_f(BlockBackend *blk, int argc, char **argv)
{
int64_t offset;
- int64_t nb_sectors;
+ int64_t nb_sectors, total_sectors;
char s1[64];
int64_t num;
int ret;
const char *retstr;
offset = 0;
- nb_sectors = bs->total_sectors;
+ total_sectors = blk_nb_sectors(blk);
+ if (total_sectors < 0) {
+ error_report("Failed to query image length: %s",
+ strerror(-total_sectors));
+ return 0;
+ }
+
+ nb_sectors = total_sectors;
do {
- ret = map_is_allocated(bs, offset, nb_sectors, &num);
+ ret = map_is_allocated(blk_bs(blk), offset, nb_sectors, &num);
if (ret < 0) {
error_report("Failed to get allocation status: %s", strerror(-ret));
return 0;
@@ -1940,7 +1964,7 @@ static int map_f(BlockDriverState *bs, int argc, char **argv)
offset += num;
nb_sectors -= num;
- } while (offset < bs->total_sectors);
+ } while (offset < total_sectors);
return 0;
}
@@ -1954,11 +1978,11 @@ static const cmdinfo_t map_cmd = {
.oneline = "prints the allocated areas of a file",
};
-static int break_f(BlockDriverState *bs, int argc, char **argv)
+static int break_f(BlockBackend *blk, int argc, char **argv)
{
int ret;
- ret = bdrv_debug_breakpoint(bs, argv[1], argv[2]);
+ ret = bdrv_debug_breakpoint(blk_bs(blk), argv[1], argv[2]);
if (ret < 0) {
printf("Could not set breakpoint: %s\n", strerror(-ret));
}
@@ -1966,11 +1990,11 @@ static int break_f(BlockDriverState *bs, int argc, char **argv)
return 0;
}
-static int remove_break_f(BlockDriverState *bs, int argc, char **argv)
+static int remove_break_f(BlockBackend *blk, int argc, char **argv)
{
int ret;
- ret = bdrv_debug_remove_breakpoint(bs, argv[1]);
+ ret = bdrv_debug_remove_breakpoint(blk_bs(blk), argv[1]);
if (ret < 0) {
printf("Could not remove breakpoint %s: %s\n", argv[1], strerror(-ret));
}
@@ -1997,11 +2021,11 @@ static const cmdinfo_t remove_break_cmd = {
.oneline = "remove a breakpoint by tag",
};
-static int resume_f(BlockDriverState *bs, int argc, char **argv)
+static int resume_f(BlockBackend *blk, int argc, char **argv)
{
int ret;
- ret = bdrv_debug_resume(bs, argv[1]);
+ ret = bdrv_debug_resume(blk_bs(blk), argv[1]);
if (ret < 0) {
printf("Could not resume request: %s\n", strerror(-ret));
}
@@ -2018,10 +2042,10 @@ static const cmdinfo_t resume_cmd = {
.oneline = "resumes the request tagged as tag",
};
-static int wait_break_f(BlockDriverState *bs, int argc, char **argv)
+static int wait_break_f(BlockBackend *blk, int argc, char **argv)
{
- while (!bdrv_debug_is_suspended(bs, argv[1])) {
- aio_poll(bdrv_get_aio_context(bs), true);
+ while (!bdrv_debug_is_suspended(blk_bs(blk), argv[1])) {
+ aio_poll(blk_get_aio_context(blk), true);
}
return 0;
@@ -2036,7 +2060,7 @@ static const cmdinfo_t wait_break_cmd = {
.oneline = "waits for the suspension of a request",
};
-static int abort_f(BlockDriverState *bs, int argc, char **argv)
+static int abort_f(BlockBackend *blk, int argc, char **argv)
{
abort();
}
@@ -2062,7 +2086,7 @@ static void sigraise_help(void)
"\n", SIGTERM);
}
-static int sigraise_f(BlockDriverState *bs, int argc, char **argv);
+static int sigraise_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t sigraise_cmd = {
.name = "sigraise",
@@ -2075,7 +2099,7 @@ static const cmdinfo_t sigraise_cmd = {
.help = sigraise_help,
};
-static int sigraise_f(BlockDriverState *bs, int argc, char **argv)
+static int sigraise_f(BlockBackend *blk, int argc, char **argv)
{
int sig = cvtnum(argv[1]);
if (sig < 0) {
@@ -2099,7 +2123,7 @@ static void sleep_cb(void *opaque)
*expired = true;
}
-static int sleep_f(BlockDriverState *bs, int argc, char **argv)
+static int sleep_f(BlockBackend *blk, int argc, char **argv)
{
char *endptr;
long ms;
@@ -2168,7 +2192,7 @@ static void help_all(void)
printf("\nUse 'help commandname' for extended help.\n");
}
-static int help_f(BlockDriverState *bs, int argc, char **argv)
+static int help_f(BlockBackend *blk, int argc, char **argv)
{
const cmdinfo_t *ct;
@@ -2198,7 +2222,7 @@ static const cmdinfo_t help_cmd = {
.oneline = "help for one or all commands",
};
-bool qemuio_command(BlockDriverState *bs, const char *cmd)
+bool qemuio_command(BlockBackend *blk, const char *cmd)
{
char *input;
const cmdinfo_t *ct;
@@ -2211,7 +2235,7 @@ bool qemuio_command(BlockDriverState *bs, const char *cmd)
if (c) {
ct = find_command(v[0]);
if (ct) {
- done = command(bs, ct, c, v);
+ done = command(blk, ct, c, v);
} else {
fprintf(stderr, "command \"%s\" not found\n", v[0]);
}
diff --git a/qemu-io.c b/qemu-io.c
index 91a445a106..4a3e71991a 100644
--- a/qemu-io.c
+++ b/qemu-io.c
@@ -28,7 +28,6 @@
static char *progname;
static BlockBackend *qemuio_blk;
-static BlockDriverState *qemuio_bs;
/* qemu-io commands passed using -c */
static int ncmdline;
@@ -36,10 +35,9 @@ static char **cmdline;
static ReadLineState *readline_state;
-static int close_f(BlockDriverState *bs, int argc, char **argv)
+static int close_f(BlockBackend *blk, int argc, char **argv)
{
blk_unref(qemuio_blk);
- qemuio_bs = NULL;
qemuio_blk = NULL;
return 0;
}
@@ -51,32 +49,22 @@ static const cmdinfo_t close_cmd = {
.oneline = "close the current open file",
};
-static int openfile(char *name, BlockDriver *drv, int flags, int growable,
- QDict *opts)
+static int openfile(char *name, int flags, QDict *opts)
{
Error *local_err = NULL;
- if (qemuio_bs) {
+ if (qemuio_blk) {
fprintf(stderr, "file open already, try 'help close'\n");
QDECREF(opts);
return 1;
}
- qemuio_blk = blk_new_with_bs("hda", &error_abort);
- qemuio_bs = blk_bs(qemuio_blk);
-
- if (growable) {
- flags |= BDRV_O_PROTOCOL;
- }
-
- if (bdrv_open(&qemuio_bs, name, NULL, opts, flags, drv, &local_err) < 0) {
+ qemuio_blk = blk_new_open("hda", name, NULL, opts, flags, &local_err);
+ if (!qemuio_blk) {
fprintf(stderr, "%s: can't open%s%s: %s\n", progname,
name ? " device " : "", name ?: "",
error_get_pretty(local_err));
error_free(local_err);
- blk_unref(qemuio_blk);
- qemuio_bs = NULL;
- qemuio_blk = NULL;
return 1;
}
@@ -96,12 +84,11 @@ static void open_help(void)
" -r, -- open file read-only\n"
" -s, -- use snapshot file\n"
" -n, -- disable host cache\n"
-" -g, -- allow file to grow (only applies to protocols)\n"
" -o, -- options to be given to the block driver"
"\n");
}
-static int open_f(BlockDriverState *bs, int argc, char **argv);
+static int open_f(BlockBackend *blk, int argc, char **argv);
static const cmdinfo_t open_cmd = {
.name = "open",
@@ -125,11 +112,10 @@ static QemuOptsList empty_opts = {
},
};
-static int open_f(BlockDriverState *bs, int argc, char **argv)
+static int open_f(BlockBackend *blk, int argc, char **argv)
{
int flags = 0;
int readonly = 0;
- int growable = 0;
int c;
QemuOpts *qopts;
QDict *opts;
@@ -145,9 +131,6 @@ static int open_f(BlockDriverState *bs, int argc, char **argv)
case 'r':
readonly = 1;
break;
- case 'g':
- growable = 1;
- break;
case 'o':
if (!qemu_opts_parse(&empty_opts, optarg, 0)) {
printf("could not parse option list -- %s\n", optarg);
@@ -170,16 +153,16 @@ static int open_f(BlockDriverState *bs, int argc, char **argv)
qemu_opts_reset(&empty_opts);
if (optind == argc - 1) {
- return openfile(argv[optind], NULL, flags, growable, opts);
+ return openfile(argv[optind], flags, opts);
} else if (optind == argc) {
- return openfile(NULL, NULL, flags, growable, opts);
+ return openfile(NULL, flags, opts);
} else {
QDECREF(opts);
return qemuio_command_usage(&open_cmd);
}
}
-static int quit_f(BlockDriverState *bs, int argc, char **argv)
+static int quit_f(BlockBackend *blk, int argc, char **argv)
{
return 1;
}
@@ -206,7 +189,6 @@ static void usage(const char *name)
" -r, --read-only export read-only\n"
" -s, --snapshot use snapshot file\n"
" -n, --nocache disable host cache\n"
-" -g, --growable allow file to grow (only applies to protocols)\n"
" -m, --misalign misalign allocations for O_DIRECT\n"
" -k, --native-aio use kernel AIO implementation (on Linux only)\n"
" -t, --cache=MODE use the given cache mode for the image\n"
@@ -317,7 +299,7 @@ static void command_loop(void)
char *input;
for (i = 0; !done && i < ncmdline; i++) {
- done = qemuio_command(qemuio_bs, cmdline[i]);
+ done = qemuio_command(qemuio_blk, cmdline[i]);
}
if (cmdline) {
g_free(cmdline);
@@ -342,7 +324,7 @@ static void command_loop(void)
if (input == NULL) {
break;
}
- done = qemuio_command(qemuio_bs, input);
+ done = qemuio_command(qemuio_blk, input);
g_free(input);
prompted = 0;
@@ -365,7 +347,6 @@ static void reenable_tty_echo(void)
int main(int argc, char **argv)
{
int readonly = 0;
- int growable = 0;
const char *sopt = "hVc:d:f:rsnmgkt:T:";
const struct option lopt[] = {
{ "help", 0, NULL, 'h' },
@@ -377,7 +358,6 @@ int main(int argc, char **argv)
{ "snapshot", 0, NULL, 's' },
{ "nocache", 0, NULL, 'n' },
{ "misalign", 0, NULL, 'm' },
- { "growable", 0, NULL, 'g' },
{ "native-aio", 0, NULL, 'k' },
{ "discard", 1, NULL, 'd' },
{ "cache", 1, NULL, 't' },
@@ -387,8 +367,8 @@ int main(int argc, char **argv)
int c;
int opt_index = 0;
int flags = BDRV_O_UNMAP;
- BlockDriver *drv = NULL;
Error *local_error = NULL;
+ QDict *opts = NULL;
#ifdef CONFIG_POSIX
signal(SIGPIPE, SIG_IGN);
@@ -414,11 +394,10 @@ int main(int argc, char **argv)
}
break;
case 'f':
- drv = bdrv_find_format(optarg);
- if (!drv) {
- error_report("Invalid format '%s'", optarg);
- exit(EXIT_FAILURE);
+ if (!opts) {
+ opts = qdict_new();
}
+ qdict_put(opts, "driver", qstring_from_str(optarg));
break;
case 'c':
add_user_command(optarg);
@@ -429,9 +408,6 @@ int main(int argc, char **argv)
case 'm':
qemuio_misalign = true;
break;
- case 'g':
- growable = 1;
- break;
case 'k':
flags |= BDRV_O_NATIVE_AIO;
break;
@@ -489,7 +465,7 @@ int main(int argc, char **argv)
}
if ((argc - optind) == 1) {
- openfile(argv[optind], drv, flags, growable, NULL);
+ openfile(argv[optind], flags, opts);
}
command_loop();
diff --git a/qemu-log.c b/qemu-log.c
index 05b5493e0c..13f3813f68 100644
--- a/qemu-log.c
+++ b/qemu-log.c
@@ -111,7 +111,7 @@ const QEMULogItem qemu_log_items[] = {
{ CPU_LOG_PCALL, "pcall",
"x86 only: show protected mode far calls/returns/exceptions" },
{ CPU_LOG_RESET, "cpu_reset",
- "x86 only: show CPU state before CPU resets" },
+ "show CPU state before CPU resets" },
{ CPU_LOG_IOPORT, "ioport",
"show all i/o ports accesses" },
{ LOG_UNIMP, "unimp",
diff --git a/qemu-nbd.c b/qemu-nbd.c
index d222512412..ac1e5d6f77 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -284,6 +284,7 @@ static void *nbd_client_thread(void *arg)
int fd, sock;
int ret;
pthread_t show_parts_thread;
+ Error *local_error = NULL;
sock = unix_socket_outgoing(sockpath);
if (sock < 0) {
@@ -291,8 +292,12 @@ static void *nbd_client_thread(void *arg)
}
ret = nbd_receive_negotiate(sock, NULL, &nbdflags,
- &size, &blocksize);
+ &size, &blocksize, &local_error);
if (ret < 0) {
+ if (local_error) {
+ fprintf(stderr, "%s\n", error_get_pretty(local_error));
+ error_free(local_error);
+ }
goto out_socket;
}
@@ -386,7 +391,6 @@ int main(int argc, char **argv)
{
BlockBackend *blk;
BlockDriverState *bs;
- BlockDriver *drv;
off_t dev_offset = 0;
uint32_t nbdflags = 0;
bool disconnect = false;
@@ -429,7 +433,7 @@ int main(int argc, char **argv)
char *end;
int flags = BDRV_O_RDWR;
int partition = -1;
- int ret;
+ int ret = 0;
int fd;
bool seen_cache = false;
bool seen_discard = false;
@@ -440,6 +444,7 @@ int main(int argc, char **argv)
const char *fmt = NULL;
Error *local_err = NULL;
BlockdevDetectZeroesOptions detect_zeroes = BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
+ QDict *options = NULL;
/* The client thread uses SIGTERM to interrupt the server. A signal
* handler ensures that "qemu-nbd -v -c" exits with a nice status code.
@@ -684,24 +689,17 @@ int main(int argc, char **argv)
atexit(bdrv_close_all);
if (fmt) {
- drv = bdrv_find_format(fmt);
- if (!drv) {
- errx(EXIT_FAILURE, "Unknown file format '%s'", fmt);
- }
- } else {
- drv = NULL;
+ options = qdict_new();
+ qdict_put(options, "driver", qstring_from_str(fmt));
}
- blk = blk_new_with_bs("hda", &error_abort);
- bs = blk_bs(blk);
-
srcpath = argv[optind];
- ret = bdrv_open(&bs, srcpath, NULL, NULL, flags, drv, &local_err);
- if (ret < 0) {
- errno = -ret;
- err(EXIT_FAILURE, "Failed to bdrv_open '%s': %s", argv[optind],
- error_get_pretty(local_err));
+ blk = blk_new_open("hda", srcpath, NULL, options, flags, &local_err);
+ if (!blk) {
+ errx(EXIT_FAILURE, "Failed to blk_new_open '%s': %s", argv[optind],
+ error_get_pretty(local_err));
}
+ bs = blk_bs(blk);
if (sn_opts) {
ret = bdrv_snapshot_load_tmp(bs,
diff --git a/qjson.c b/qjson.c
index b242222a58..0cda2690f5 100644
--- a/qjson.c
+++ b/qjson.c
@@ -4,7 +4,7 @@
* Copyright Alexander Graf
*
* Authors:
- * Alexander Graf <agraf@suse.de
+ * Alexander Graf <agraf@suse.de>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
diff --git a/qmp-commands.hx b/qmp-commands.hx
index c5f16dd922..a85d8479e3 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -2146,6 +2146,8 @@ Each json-object contain the following:
- "iops_size": I/O size when limiting by iops (json-int)
- "detect_zeroes": detect and optimize zero writing (json-string)
- Possible values: "off", "on", "unmap"
+ - "write_threshold": write offset threshold in bytes, a event will be
+ emitted if crossed. Zero if disabled (json-int)
- "image": the detail of the image, it is a json-object containing
the following:
- "filename": image file name (json-string)
@@ -2223,6 +2225,7 @@ Example:
"iops_wr_max": 0,
"iops_size": 0,
"detect_zeroes": "on",
+ "write_threshold": 0,
"image":{
"filename":"disks/test.qcow2",
"format":"qcow2",
@@ -2303,6 +2306,10 @@ Each json-object contain the following:
- "flush_total_time_ns": total time spend on cache flushes in nano-seconds (json-int)
- "wr_highest_offset": Highest offset of a sector written since the
BlockDriverState has been opened (json-int)
+ - "rd_merged": number of read requests that have been merged into
+ another request (json-int)
+ - "wr_merged": number of write requests that have been merged into
+ another request (json-int)
- "parent": Contains recursively the statistics of the underlying
protocol (e.g. the host file for a qcow2 image). If there is
no underlying protocol, this field is omitted
@@ -2326,6 +2333,8 @@ Example:
"rd_total_times_ns":3465673657
"flush_total_times_ns":49653
"flush_operations":61,
+ "rd_merged":0,
+ "wr_merged":0
}
},
"stats":{
@@ -2337,7 +2346,9 @@ Example:
"flush_operations":51,
"wr_total_times_ns":313253456
"rd_total_times_ns":3465673657
- "flush_total_times_ns":49653
+ "flush_total_times_ns":49653,
+ "rd_merged":0,
+ "wr_merged":0
}
},
{
@@ -2351,7 +2362,9 @@ Example:
"flush_operations":0,
"wr_total_times_ns":0
"rd_total_times_ns":0
- "flush_total_times_ns":0
+ "flush_total_times_ns":0,
+ "rd_merged":0,
+ "wr_merged":0
}
},
{
@@ -2365,7 +2378,9 @@ Example:
"flush_operations":0,
"wr_total_times_ns":0
"rd_total_times_ns":0
- "flush_total_times_ns":0
+ "flush_total_times_ns":0,
+ "rd_merged":0,
+ "wr_merged":0
}
},
{
@@ -2379,7 +2394,9 @@ Example:
"flush_operations":0,
"wr_total_times_ns":0
"rd_total_times_ns":0
- "flush_total_times_ns":0
+ "flush_total_times_ns":0,
+ "rd_merged":0,
+ "wr_merged":0
}
}
]
@@ -3671,6 +3688,7 @@ Example:
"iops_rd_max": 0,
"iops_wr_max": 0,
"iops_size": 0,
+ "write_threshold": 0,
"image":{
"filename":"disks/test.qcow2",
"format":"qcow2",
@@ -3907,3 +3925,31 @@ Move mouse pointer to absolute coordinates (20000, 400).
<- { "return": {} }
EQMP
+
+ {
+ .name = "block-set-write-threshold",
+ .args_type = "node-name:s,write-threshold:l",
+ .mhandler.cmd_new = qmp_marshal_input_block_set_write_threshold,
+ },
+
+SQMP
+block-set-write-threshold
+------------
+
+Change the write threshold for a block drive. The threshold is an offset,
+thus must be non-negative. Default is no write threshold.
+Setting the threshold to zero disables it.
+
+Arguments:
+
+- "node-name": the node name in the block driver state graph (json-string)
+- "write-threshold": the write threshold in bytes (json-int)
+
+Example:
+
+-> { "execute": "block-set-write-threshold",
+ "arguments": { "node-name": "mydev",
+ "write-threshold": 17179869184 } }
+<- { "return": {} }
+
+EQMP
diff --git a/qmp.c b/qmp.c
index 7f2d25a492..d701cff5ac 100644
--- a/qmp.c
+++ b/qmp.c
@@ -134,6 +134,12 @@ VncInfo *qmp_query_vnc(Error **errp)
error_set(errp, QERR_FEATURE_DISABLED, "vnc");
return NULL;
};
+
+VncInfo2List *qmp_query_vnc_servers(Error **errp)
+{
+ error_set(errp, QERR_FEATURE_DISABLED, "vnc");
+ return NULL;
+};
#endif
#ifndef CONFIG_SPICE
@@ -154,6 +160,7 @@ SpiceInfo *qmp_query_spice(Error **errp)
void qmp_cont(Error **errp)
{
+ Error *local_err = NULL;
BlockDriverState *bs;
if (runstate_needs_reset()) {
@@ -167,10 +174,9 @@ void qmp_cont(Error **errp)
bdrv_iostatus_reset(bs);
}
for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) {
- if (bdrv_key_required(bs)) {
- error_set(errp, QERR_DEVICE_ENCRYPTED,
- bdrv_get_device_name(bs),
- bdrv_get_encrypted_filename(bs));
+ bdrv_add_key(bs, NULL, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
return;
}
}
@@ -381,6 +387,11 @@ static void qmp_change_vnc_listen(const char *target, Error **errp)
qemu_opts_del(opts);
}
opts = vnc_parse_func(target);
+ if (!opts) {
+ return;
+ }
+
+ vnc_auto_assign_id(olist, opts);
vnc_display_open("default", errp);
}
diff --git a/savevm.c b/savevm.c
index 80407662ad..1d08165c5a 100644
--- a/savevm.c
+++ b/savevm.c
@@ -821,7 +821,7 @@ void qemu_savevm_state_cancel(void)
}
}
-static int qemu_savevm_state(QEMUFile *f)
+static int qemu_savevm_state(QEMUFile *f, Error **errp)
{
int ret;
MigrationParams params = {
@@ -829,7 +829,7 @@ static int qemu_savevm_state(QEMUFile *f)
.shared = 0
};
- if (qemu_savevm_state_blocked(NULL)) {
+ if (qemu_savevm_state_blocked(errp)) {
return -EINVAL;
}
@@ -850,6 +850,7 @@ static int qemu_savevm_state(QEMUFile *f)
}
if (ret != 0) {
qemu_savevm_state_cancel();
+ error_setg_errno(errp, -ret, "Error while writing VM state");
}
return ret;
}
@@ -1102,6 +1103,7 @@ void do_savevm(Monitor *mon, const QDict *qdict)
qemu_timeval tv;
struct tm tm;
const char *name = qdict_get_try_str(qdict, "name");
+ Error *local_err = NULL;
/* Verify if there is a device that doesn't support snapshots and is writable */
bs = NULL;
@@ -1160,11 +1162,12 @@ void do_savevm(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "Could not open VM state file\n");
goto the_end;
}
- ret = qemu_savevm_state(f);
+ ret = qemu_savevm_state(f, &local_err);
vm_state_size = qemu_ftell(f);
qemu_fclose(f);
if (ret < 0) {
- monitor_printf(mon, "Error %d while writing VM\n", ret);
+ monitor_printf(mon, "%s\n", error_get_pretty(local_err));
+ error_free(local_err);
goto the_end;
}
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
index b8b9968e00..0c8b22f2aa 100755
--- a/scripts/analyze-migration.py
+++ b/scripts/analyze-migration.py
@@ -247,7 +247,7 @@ class HTABSection(object):
if index == 0 and n_valid == 0 and n_invalid == 0:
break
- self.file.readvar(n_valid * HASH_PTE_SIZE_64)
+ self.file.readvar(n_valid * self.HASH_PTE_SIZE_64)
def getDict(self):
return ""
diff --git a/scripts/qtest.py b/scripts/qtest.py
new file mode 100644
index 0000000000..a9714453a2
--- /dev/null
+++ b/scripts/qtest.py
@@ -0,0 +1,71 @@
+# QEMU qtest library
+#
+# Copyright (C) 2015 Red Hat Inc.
+#
+# Authors:
+# Fam Zheng <famz@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+#
+# Based on qmp.py.
+#
+
+import errno
+import socket
+
+class QEMUQtestProtocol(object):
+ def __init__(self, address, server=False):
+ """
+ Create a QEMUQtestProtocol object.
+
+ @param address: QEMU address, can be either a unix socket path (string)
+ or a tuple in the form ( address, port ) for a TCP
+ connection
+ @param server: server mode, listens on the socket (bool)
+ @raise socket.error on socket connection errors
+ @note No connection is established, this is done by the connect() or
+ accept() methods
+ """
+ self._address = address
+ self._sock = self._get_sock()
+ if server:
+ self._sock.bind(self._address)
+ self._sock.listen(1)
+
+ def _get_sock(self):
+ if isinstance(self._address, tuple):
+ family = socket.AF_INET
+ else:
+ family = socket.AF_UNIX
+ return socket.socket(family, socket.SOCK_STREAM)
+
+ def connect(self):
+ """
+ Connect to the qtest socket.
+
+ @raise socket.error on socket connection errors
+ """
+ self._sock.connect(self._address)
+
+ def accept(self):
+ """
+ Await connection from QEMU.
+
+ @raise socket.error on socket connection errors
+ """
+ self._sock, _ = self._sock.accept()
+
+ def cmd(self, qtest_cmd):
+ """
+ Send a qtest command on the wire.
+
+ @param qtest_cmd: qtest command text to be sent
+ """
+ self._sock.sendall(qtest_cmd + "\n")
+
+ def close(self):
+ self._sock.close()
+
+ def settimeout(self, timeout):
+ self._sock.settimeout(timeout)
diff --git a/spice-qemu-char.c b/spice-qemu-char.c
index 7e0d300777..a4f4e578fe 100644
--- a/spice-qemu-char.c
+++ b/spice-qemu-char.c
@@ -158,7 +158,7 @@ static gboolean spice_char_source_dispatch(GSource *source,
return func(NULL, G_IO_OUT, user_data);
}
-GSourceFuncs SpiceCharSourceFuncs = {
+static GSourceFuncs SpiceCharSourceFuncs = {
.prepare = spice_char_source_prepare,
.check = spice_char_source_check,
.dispatch = spice_char_source_dispatch,
diff --git a/stubs/qtest.c b/stubs/qtest.c
index e671ed8e8e..dc17594bb6 100644
--- a/stubs/qtest.c
+++ b/stubs/qtest.c
@@ -8,7 +8,7 @@
* See the COPYING file in the top-level directory.
*/
-#include "qemu-common.h"
+#include "sysemu/qtest.h"
/* Needed for qtest_allowed() */
bool qtest_allowed;
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index f8883672a8..9c77d46bda 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -2790,7 +2790,6 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
target_ulong pc_start;
target_ulong pc_mask;
uint32_t insn;
- uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int j, lj = -1;
ExitStatus ret;
@@ -2798,7 +2797,6 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
int max_insns;
pc_start = tb->pc;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
ctx.tb = tb;
ctx.pc = pc_start;
@@ -2839,11 +2837,12 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
}
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
- while (lj < j)
+ while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
+ }
}
tcg_ctx.gen_opc_pc[lj] = ctx.pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
@@ -2881,7 +2880,7 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
or exhaust instruction count, stop generation. */
if (ret == NO_EXIT
&& ((ctx.pc & pc_mask) == 0
- || tcg_ctx.gen_opc_ptr >= gen_opc_end
+ || tcg_op_buf_full()
|| num_insns >= max_insns
|| singlestep
|| ctx.singlestep_enabled)) {
@@ -2912,12 +2911,13 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
- while (lj <= j)
+ while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
+ }
} else {
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index d38af747ac..986f04cfd6 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -544,13 +544,16 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
{
ObjectClass *oc;
char *typename;
+ char **cpuname;
if (!cpu_model) {
return NULL;
}
- typename = g_strdup_printf("%s-" TYPE_ARM_CPU, cpu_model);
+ cpuname = g_strsplit(cpu_model, ",", 1);
+ typename = g_strdup_printf("%s-" TYPE_ARM_CPU, cpuname[0]);
oc = object_class_by_name(typename);
+ g_strfreev(cpuname);
g_free(typename);
if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
object_class_is_abstract(oc)) {
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 1830a12d4a..11845a6644 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -495,6 +495,8 @@ typedef struct CPUARMState {
ARMCPU *cpu_arm_init(const char *cpu_model);
int cpu_arm_exec(CPUARMState *s);
uint32_t do_arm_semihosting(CPUARMState *env);
+void aarch64_sync_32_to_64(CPUARMState *env);
+void aarch64_sync_64_to_32(CPUARMState *env);
static inline bool is_a64(CPUARMState *env)
{
diff --git a/target-arm/cpu64.c b/target-arm/cpu64.c
index bb778b3d92..823c739f08 100644
--- a/target-arm/cpu64.c
+++ b/target-arm/cpu64.c
@@ -32,6 +32,11 @@ static inline void set_feature(CPUARMState *env, int feature)
env->features |= 1ULL << feature;
}
+static inline void unset_feature(CPUARMState *env, int feature)
+{
+ env->features &= ~(1ULL << feature);
+}
+
#ifndef CONFIG_USER_ONLY
static uint64_t a57_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
@@ -170,8 +175,42 @@ static const ARMCPUInfo aarch64_cpus[] = {
{ .name = NULL }
};
+static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
+
+static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ /* At this time, this property is only allowed if KVM is enabled. This
+ * restriction allows us to avoid fixing up functionality that assumes a
+ * uniform execution state like do_interrupt.
+ */
+ if (!kvm_enabled()) {
+ error_setg(errp, "'aarch64' feature cannot be disabled "
+ "unless KVM is enabled");
+ return;
+ }
+
+ if (value == false) {
+ unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ } else {
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ }
+}
+
static void aarch64_cpu_initfn(Object *obj)
{
+ object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64,
+ aarch64_cpu_set_aarch64, NULL);
+ object_property_set_description(obj, "aarch64",
+ "Set on/off to enable/disable aarch64 "
+ "execution state ",
+ NULL);
}
static void aarch64_cpu_finalizefn(Object *obj)
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index 8aa40e9763..7e0d038563 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -466,7 +466,6 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
unsigned int new_el = arm_excp_target_el(cs, cs->exception_index);
target_ulong addr = env->cp15.vbar_el[new_el];
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
- int i;
if (arm_current_el(env) < new_el) {
if (env->aarch64) {
@@ -530,9 +529,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
}
env->elr_el[new_el] = env->regs[15];
- for (i = 0; i < 15; i++) {
- env->xregs[i] = env->regs[i];
- }
+ aarch64_sync_32_to_64(env);
env->condexec_bits = 0;
}
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 1a1a00577e..3bc20af04f 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -4096,6 +4096,11 @@ unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
return 1;
}
+void aarch64_sync_64_to_32(CPUARMState *env)
+{
+ g_assert_not_reached();
+}
+
#else
/* Map CPU modes onto saved register banks. */
@@ -4425,6 +4430,212 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
env->thumb = addr & 1;
}
+/* Function used to synchronize QEMU's AArch64 register set with AArch32
+ * register set. This is necessary when switching between AArch32 and AArch64
+ * execution state.
+ */
+void aarch64_sync_32_to_64(CPUARMState *env)
+{
+ int i;
+ uint32_t mode = env->uncached_cpsr & CPSR_M;
+
+ /* We can blanket copy R[0:7] to X[0:7] */
+ for (i = 0; i < 8; i++) {
+ env->xregs[i] = env->regs[i];
+ }
+
+ /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
+ * Otherwise, they come from the banked user regs.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 8; i < 13; i++) {
+ env->xregs[i] = env->usr_regs[i - 8];
+ }
+ } else {
+ for (i = 8; i < 13; i++) {
+ env->xregs[i] = env->regs[i];
+ }
+ }
+
+ /* Registers x13-x23 are the various mode SP and FP registers. Registers
+ * r13 and r14 are only copied if we are in that mode, otherwise we copy
+ * from the mode banked register.
+ */
+ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
+ env->xregs[13] = env->regs[13];
+ env->xregs[14] = env->regs[14];
+ } else {
+ env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
+ /* HYP is an exception in that it is copied from r14 */
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->xregs[14] = env->regs[14];
+ } else {
+ env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
+ }
+ }
+
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->xregs[15] = env->regs[13];
+ } else {
+ env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
+ }
+
+ if (mode == ARM_CPU_MODE_IRQ) {
+ env->xregs[16] = env->regs[13];
+ env->xregs[17] = env->regs[14];
+ } else {
+ env->xregs[16] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
+ env->xregs[17] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
+ }
+
+ if (mode == ARM_CPU_MODE_SVC) {
+ env->xregs[18] = env->regs[13];
+ env->xregs[19] = env->regs[14];
+ } else {
+ env->xregs[18] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
+ env->xregs[19] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
+ }
+
+ if (mode == ARM_CPU_MODE_ABT) {
+ env->xregs[20] = env->regs[13];
+ env->xregs[21] = env->regs[14];
+ } else {
+ env->xregs[20] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
+ env->xregs[21] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
+ }
+
+ if (mode == ARM_CPU_MODE_UND) {
+ env->xregs[22] = env->regs[13];
+ env->xregs[23] = env->regs[14];
+ } else {
+ env->xregs[22] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
+ env->xregs[23] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
+ }
+
+ /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ * mode, then we can copy from r8-r14. Otherwise, we copy from the
+ * FIQ bank for r8-r14.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 24; i < 31; i++) {
+ env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
+ }
+ } else {
+ for (i = 24; i < 29; i++) {
+ env->xregs[i] = env->fiq_regs[i - 24];
+ }
+ env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
+ env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
+ }
+
+ env->pc = env->regs[15];
+}
+
+/* Function used to synchronize QEMU's AArch32 register set with AArch64
+ * register set. This is necessary when switching between AArch32 and AArch64
+ * execution state.
+ */
+void aarch64_sync_64_to_32(CPUARMState *env)
+{
+ int i;
+ uint32_t mode = env->uncached_cpsr & CPSR_M;
+
+ /* We can blanket copy X[0:7] to R[0:7] */
+ for (i = 0; i < 8; i++) {
+ env->regs[i] = env->xregs[i];
+ }
+
+ /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
+ * Otherwise, we copy x8-x12 into the banked user regs.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 8; i < 13; i++) {
+ env->usr_regs[i - 8] = env->xregs[i];
+ }
+ } else {
+ for (i = 8; i < 13; i++) {
+ env->regs[i] = env->xregs[i];
+ }
+ }
+
+ /* Registers r13 & r14 depend on the current mode.
+ * If we are in a given mode, we copy the corresponding x registers to r13
+ * and r14. Otherwise, we copy the x register to the banked r13 and r14
+ * for the mode.
+ */
+ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
+ env->regs[13] = env->xregs[13];
+ env->regs[14] = env->xregs[14];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
+
+ /* HYP is an exception in that it does not have its own banked r14 but
+ * shares the USR r14
+ */
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->regs[14] = env->xregs[14];
+ } else {
+ env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
+ }
+ }
+
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->regs[13] = env->xregs[15];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
+ }
+
+ if (mode == ARM_CPU_MODE_IRQ) {
+ env->regs[13] = env->xregs[16];
+ env->regs[14] = env->xregs[17];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
+ env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
+ }
+
+ if (mode == ARM_CPU_MODE_SVC) {
+ env->regs[13] = env->xregs[18];
+ env->regs[14] = env->xregs[19];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
+ env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
+ }
+
+ if (mode == ARM_CPU_MODE_ABT) {
+ env->regs[13] = env->xregs[20];
+ env->regs[14] = env->xregs[21];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
+ env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
+ }
+
+ if (mode == ARM_CPU_MODE_UND) {
+ env->regs[13] = env->xregs[22];
+ env->regs[14] = env->xregs[23];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
+ env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
+ }
+
+ /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ * mode, then we can copy to r8-r14. Otherwise, we copy to the
+ * FIQ bank for r8-r14.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 24; i < 31; i++) {
+ env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
+ }
+ } else {
+ for (i = 24; i < 29; i++) {
+ env->fiq_regs[i - 24] = env->xregs[i];
+ }
+ env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
+ env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
+ }
+
+ env->regs[15] = env->pc;
+}
+
/* Handle a CPU exception. */
void arm_cpu_do_interrupt(CPUState *cs)
{
diff --git a/target-arm/kvm64.c b/target-arm/kvm64.c
index 033babf551..8cf3a627ec 100644
--- a/target-arm/kvm64.c
+++ b/target-arm/kvm64.c
@@ -82,7 +82,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs);
if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
- !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
fprintf(stderr, "KVM is not supported for this guest CPU type\n");
return -EINVAL;
}
@@ -96,6 +96,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
cpu->psci_version = 2;
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
}
+ if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
+ }
/* Do KVM_ARM_VCPU_INIT ioctl */
ret = kvm_arm_vcpu_init(cs);
@@ -133,6 +136,13 @@ int kvm_arch_put_registers(CPUState *cs, int level)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
+ /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
+ * AArch64 registers before pushing them out to 64-bit KVM.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_32_to_64(env);
+ }
+
for (i = 0; i < 31; i++) {
reg.id = AARCH64_CORE_REG(regs.regs[i]);
reg.addr = (uintptr_t) &env->xregs[i];
@@ -162,7 +172,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
/* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
- val = pstate_read(env);
+ if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read(env);
+ }
reg.id = AARCH64_CORE_REG(regs.pstate);
reg.addr = (uintptr_t) &val;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
@@ -242,7 +256,14 @@ int kvm_arch_get_registers(CPUState *cs)
if (ret) {
return ret;
}
- pstate_write(env, val);
+
+ env->aarch64 = ((val & PSTATE_nRW) == 0);
+ if (is_a64(env)) {
+ pstate_write(env, val);
+ } else {
+ env->uncached_cpsr = val & CPSR_M;
+ cpsr_write(env, val, 0xffffffff);
+ }
/* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
* QEMU side we keep the current SP in xregs[31] as well.
@@ -256,6 +277,15 @@ int kvm_arch_get_registers(CPUState *cs)
return ret;
}
+ /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
+ * incoming AArch64 regs received from 64-bit KVM.
+ * We must perform this after all of the registers have been acquired from
+ * the kernel.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_64_to_32(env);
+ }
+
reg.id = AARCH64_CORE_REG(elr_el1);
reg.addr = (uintptr_t) &env->elr_el[1];
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 2bed914770..7713022752 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -465,7 +465,7 @@ void HELPER(exception_return)(CPUARMState *env)
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
- int new_el, i;
+ int new_el;
aarch64_save_sp(env, cur_el);
@@ -491,9 +491,7 @@ void HELPER(exception_return)(CPUARMState *env)
if (!arm_singlestep_active(env)) {
env->uncached_cpsr &= ~PSTATE_SS;
}
- for (i = 0; i < 15; i++) {
- env->regs[i] = env->xregs[i];
- }
+ aarch64_sync_64_to_32(env);
env->regs[15] = env->elr_el[1] & ~0x1;
} else {
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index acf4b162bd..34b489f2ec 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -1077,7 +1077,7 @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
{
uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
- if (insn & (1 << 31)) {
+ if (insn & (1U << 31)) {
/* C5.6.26 BL Branch with link */
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
}
@@ -1271,7 +1271,7 @@ static void gen_get_nzcv(TCGv_i64 tcg_rt)
TCGv_i32 nzcv = tcg_temp_new_i32();
/* build bit 31, N */
- tcg_gen_andi_i32(nzcv, cpu_NF, (1 << 31));
+ tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
/* build bit 30, Z */
tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
@@ -1296,7 +1296,7 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt)
tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
/* bit 31, N */
- tcg_gen_andi_i32(cpu_NF, nzcv, (1 << 31));
+ tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
/* bit 30, Z */
tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
@@ -1917,7 +1917,7 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int rt2 = extract32(insn, 10, 5);
- int64_t offset = sextract32(insn, 15, 7);
+ uint64_t offset = sextract64(insn, 15, 7);
int index = extract32(insn, 23, 2);
bool is_vector = extract32(insn, 26, 1);
bool is_load = extract32(insn, 22, 1);
@@ -2662,11 +2662,12 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
{
unsigned int page, rd;
uint64_t base;
- int64_t offset;
+ uint64_t offset;
page = extract32(insn, 31, 1);
/* SignExtend(immhi:immlo) -> offset */
- offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
+ offset = sextract64(insn, 5, 19);
+ offset = offset << 2 | extract32(insn, 29, 2);
rd = extract32(insn, 0, 5);
base = s->pc - 4;
@@ -2820,7 +2821,10 @@ static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
* by r within the element (which is e bits wide)...
*/
mask = bitmask64(s + 1);
- mask = (mask >> r) | (mask << (e - r));
+ if (r) {
+ mask = (mask >> r) | (mask << (e - r));
+ mask &= bitmask64(e);
+ }
/* ...then replicate the element over the whole 64 bit value */
mask = bitfield_replicate(mask, e);
*result = mask;
@@ -10916,7 +10920,6 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
- uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
@@ -10927,8 +10930,6 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
@@ -10998,7 +10999,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
@@ -11048,7 +11049,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
* ensures prefetch aborts occur at the right place.
*/
num_insns++;
- } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
!dc->ss_active &&
@@ -11108,7 +11109,6 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@@ -11120,7 +11120,7 @@ done_generating:
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 1c36b8b05e..36868ed05f 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -11025,7 +11025,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
- uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
@@ -11046,8 +11045,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
@@ -11182,7 +11179,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
}
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
@@ -11248,7 +11245,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns ++;
- } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
!dc->ss_active &&
@@ -11357,7 +11354,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@@ -11369,7 +11365,7 @@ done_generating:
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-cris/translate.c b/target-cris/translate.c
index b675ed0b18..47abcefaae 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -3116,7 +3116,6 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
{
CPUState *cs = CPU(cpu);
CPUCRISState *env = &cpu->env;
- uint16_t *gen_opc_end;
uint32_t pc_start;
unsigned int insn_len;
int j, lj;
@@ -3142,8 +3141,6 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
dc->cpu = cpu;
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->ppc = pc_start;
dc->pc = pc_start;
@@ -3207,7 +3204,7 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
check_breakpoint(env, dc);
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
@@ -3291,7 +3288,7 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
break;
}
} while (!dc->is_jmp && !dc->cpustate_changed
- && tcg_ctx.gen_opc_ptr < gen_opc_end
+ && !tcg_op_buf_full()
&& !singlestep
&& (dc->pc < next_page_start)
&& num_insns < max_insns);
@@ -3344,9 +3341,9 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
}
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
@@ -3361,8 +3358,8 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
log_target_disas(env, pc_start, dc->pc - pc_start,
env->pregs[PR_VR]);
- qemu_log("\nisize=%d osize=%td\n",
- dc->pc - pc_start, tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf);
+ qemu_log("\nisize=%d osize=%d\n",
+ dc->pc - pc_start, tcg_op_buf_count());
}
#endif
#endif
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 9ebdf4b384..094cec059c 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -7913,7 +7913,6 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
CPUX86State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
target_ulong pc_ptr;
- uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int j, lj;
uint64_t flags;
@@ -7993,8 +7992,6 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
cpu_ptr1 = tcg_temp_new_ptr();
cpu_cc_srcT = tcg_temp_local_new();
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
lj = -1;
@@ -8015,7 +8012,7 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
}
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
@@ -8060,7 +8057,7 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
break;
}
/* if too long translation, stop generation too */
- if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
+ if (tcg_op_buf_full() ||
(pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
num_insns >= max_insns) {
gen_jmp_im(pc_ptr - dc->cs_base);
@@ -8077,10 +8074,10 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
gen_io_end();
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
/* we don't forget to fill the last values */
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-lm32/translate.c b/target-lm32/translate.c
index a7579dc8be..9d087b938d 100644
--- a/target-lm32/translate.c
+++ b/target-lm32/translate.c
@@ -1062,7 +1062,6 @@ void gen_intermediate_code_internal(LM32CPU *cpu,
CPUState *cs = CPU(cpu);
CPULM32State *env = &cpu->env;
struct DisasContext ctx, *dc = &ctx;
- uint16_t *gen_opc_end;
uint32_t pc_start;
int j, lj;
uint32_t next_page_start;
@@ -1075,8 +1074,6 @@ void gen_intermediate_code_internal(LM32CPU *cpu,
dc->num_watchpoints = cpu->num_watchpoints;
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
@@ -1100,7 +1097,7 @@ void gen_intermediate_code_internal(LM32CPU *cpu,
check_breakpoint(env, dc);
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
@@ -1124,7 +1121,7 @@ void gen_intermediate_code_internal(LM32CPU *cpu,
num_insns++;
} while (!dc->is_jmp
- && tcg_ctx.gen_opc_ptr < gen_opc_end
+ && !tcg_op_buf_full()
&& !cs->singlestep_enabled
&& !singlestep
&& (dc->pc < next_page_start)
@@ -1158,9 +1155,9 @@ void gen_intermediate_code_internal(LM32CPU *cpu,
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
@@ -1174,9 +1171,8 @@ void gen_intermediate_code_internal(LM32CPU *cpu,
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("\n");
log_target_disas(env, pc_start, dc->pc - pc_start, 0);
- qemu_log("\nisize=%d osize=%td\n",
- dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
- tcg_ctx.gen_opc_buf);
+ qemu_log("\nisize=%d osize=%d\n",
+ dc->pc - pc_start, tcg_op_buf_count());
}
#endif
}
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index 47edc7ae51..a39b49515d 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -2980,7 +2980,6 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
CPUState *cs = CPU(cpu);
CPUM68KState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
- uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int j, lj;
target_ulong pc_start;
@@ -2993,8 +2992,6 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->env = env;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
@@ -3026,7 +3023,7 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
break;
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
@@ -3041,7 +3038,7 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
dc->insn_pc = dc->pc;
disas_m68k_insn(env, dc);
num_insns++;
- } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
(pc_offset) < (TARGET_PAGE_SIZE - 32) &&
@@ -3075,7 +3072,6 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
}
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@@ -3086,7 +3082,7 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
index 69ce4df4a3..5ff3833010 100644
--- a/target-microblaze/translate.c
+++ b/target-microblaze/translate.c
@@ -1673,7 +1673,6 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
{
CPUState *cs = CPU(cpu);
CPUMBState *env = &cpu->env;
- uint16_t *gen_opc_end;
uint32_t pc_start;
int j, lj;
struct DisasContext ctx;
@@ -1688,8 +1687,6 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
dc->tb = tb;
org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->jmp = 0;
dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
@@ -1732,7 +1729,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
check_breakpoint(env, dc);
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
@@ -1795,10 +1792,10 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
break;
}
} while (!dc->is_jmp && !dc->cpustate_changed
- && tcg_ctx.gen_opc_ptr < gen_opc_end
- && !singlestep
- && (dc->pc < next_page_start)
- && num_insns < max_insns);
+ && !tcg_op_buf_full()
+ && !singlestep
+ && (dc->pc < next_page_start)
+ && num_insns < max_insns);
npc = dc->pc;
if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
@@ -1846,9 +1843,9 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
}
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
@@ -1864,9 +1861,8 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
#if DISAS_GNU
log_target_disas(env, pc_start, dc->pc - pc_start, 0);
#endif
- qemu_log("\nisize=%d osize=%td\n",
- dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
- tcg_ctx.gen_opc_buf);
+ qemu_log("\nisize=%d osize=%d\n",
+ dc->pc - pc_start, tcg_op_buf_count());
}
#endif
#endif
diff --git a/target-mips/machine.c b/target-mips/machine.c
index 0ba7d736db..6c76dfbe03 100644
--- a/target-mips/machine.c
+++ b/target-mips/machine.c
@@ -285,6 +285,10 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_sbe32s(f, &env->CP0_SRSConf4);
qemu_get_sbe32s(f, &env->CP0_HWREna);
qemu_get_betls(f, &env->CP0_BadVAddr);
+ if (version_id >= 5) {
+ qemu_get_be32s(f, &env->CP0_BadInstr);
+ qemu_get_be32s(f, &env->CP0_BadInstrP);
+ }
qemu_get_sbe32s(f, &env->CP0_Count);
qemu_get_betls(f, &env->CP0_EntryHi);
qemu_get_sbe32s(f, &env->CP0_Compare);
@@ -319,8 +323,6 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_betls(f, &env->CP0_ErrorEPC);
qemu_get_sbe32s(f, &env->CP0_DESAVE);
if (version_id >= 5) {
- qemu_get_be32s(f, &env->CP0_BadInstr);
- qemu_get_be32s(f, &env->CP0_BadInstrP);
for (i = 0; i < MIPS_KSCRATCH_NUM; i++) {
qemu_get_betls(f, &env->CP0_KScratch[i]);
}
diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c
index ea7d95f36c..73a8e458fc 100644
--- a/target-mips/op_helper.c
+++ b/target-mips/op_helper.c
@@ -304,16 +304,20 @@ static inline hwaddr do_translate_address(CPUMIPSState *env,
}
}
-#define HELPER_LD_ATOMIC(name, insn) \
+#define HELPER_LD_ATOMIC(name, insn, almask) \
target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
{ \
+ if (arg & almask) { \
+ env->CP0_BadVAddr = arg; \
+ helper_raise_exception(env, EXCP_AdEL); \
+ } \
env->lladdr = do_translate_address(env, arg, 0); \
env->llval = do_##insn(env, arg, mem_idx); \
return env->llval; \
}
-HELPER_LD_ATOMIC(ll, lw)
+HELPER_LD_ATOMIC(ll, lw, 0x3)
#ifdef TARGET_MIPS64
-HELPER_LD_ATOMIC(lld, ld)
+HELPER_LD_ATOMIC(lld, ld, 0x7)
#endif
#undef HELPER_LD_ATOMIC
diff --git a/target-mips/translate.c b/target-mips/translate.c
index e9d86b2364..ca51149872 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -4947,7 +4947,7 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
#if defined(TARGET_MIPS64)
if (ctx->rxi) {
TCGv tmp = tcg_temp_new();
- tcg_gen_andi_tl(tmp, arg, (3ull << 62));
+ tcg_gen_andi_tl(tmp, arg, (3ull << CP0EnLo_XI));
tcg_gen_shri_tl(tmp, tmp, 32);
tcg_gen_or_tl(arg, arg, tmp);
tcg_temp_free(tmp);
@@ -5002,7 +5002,7 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
#if defined(TARGET_MIPS64)
if (ctx->rxi) {
TCGv tmp = tcg_temp_new();
- tcg_gen_andi_tl(tmp, arg, (3ull << 62));
+ tcg_gen_andi_tl(tmp, arg, (3ull << CP0EnLo_XI));
tcg_gen_shri_tl(tmp, tmp, 32);
tcg_gen_or_tl(arg, arg, tmp);
tcg_temp_free(tmp);
@@ -13653,7 +13653,7 @@ static void decode_micromips32_opc (CPUMIPSState *env, DisasContext *ctx,
target. */
break;
case LUI:
- gen_logic_imm(ctx, OPC_LUI, rs, -1, imm);
+ gen_logic_imm(ctx, OPC_LUI, rs, 0, imm);
break;
case SYNCI:
/* Break the TB to be able to sync copied instructions
@@ -18729,6 +18729,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
case OPC_SWL:
case OPC_SWR:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* fall through */
case OPC_SB ... OPC_SH:
case OPC_SW:
gen_st(ctx, op, rt, rs, imm);
@@ -18817,6 +18818,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
case OPC_PS_FMT:
check_cp1_enabled(ctx);
check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* fall through */
case OPC_S_FMT:
case OPC_D_FMT:
check_cp1_enabled(ctx);
@@ -19000,6 +19002,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
case OPC_LDL ... OPC_LDR:
case OPC_LLD:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* fall through */
case OPC_LWU:
case OPC_LD:
check_insn(ctx, ISA_MIPS3);
@@ -19008,6 +19011,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
break;
case OPC_SDL ... OPC_SDR:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* fall through */
case OPC_SD:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
@@ -19091,7 +19095,7 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
CPUMIPSState *env = &cpu->env;
DisasContext ctx;
target_ulong pc_start;
- uint16_t *gen_opc_end;
+ target_ulong next_page_start;
CPUBreakpoint *bp;
int j, lj = -1;
int num_insns;
@@ -19103,7 +19107,7 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
qemu_log("search pc %d\n", search_pc);
pc_start = tb->pc;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
ctx.pc = pc_start;
ctx.saved_pc = -1;
ctx.singlestep_enabled = cs->singlestep_enabled;
@@ -19147,7 +19151,7 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
@@ -19202,10 +19206,11 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
break;
}
- if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
+ if (ctx.pc >= next_page_start) {
break;
+ }
- if (tcg_ctx.gen_opc_ptr >= gen_opc_end) {
+ if (tcg_op_buf_full()) {
break;
}
@@ -19240,9 +19245,9 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
}
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-mips/translate_init.c b/target-mips/translate_init.c
index 1543f6c388..9e8433a919 100644
--- a/target-mips/translate_init.c
+++ b/target-mips/translate_init.c
@@ -474,7 +474,7 @@ static const mips_def_t mips_defs[] =
.CP0_LLAddr_shift = 4,
.SYNCI_Step = 32,
.CCRes = 2,
- .CP0_Status_rw_bitmask = 0x32F8FFFF,
+ .CP0_Status_rw_bitmask = 0x12F8FFFF,
.SEGBITS = 42,
.PABITS = 36,
.insn_flags = CPU_MIPS64,
@@ -575,7 +575,7 @@ static const mips_def_t mips_defs[] =
.CP0_LLAddr_shift = 4,
.SYNCI_Step = 32,
.CCRes = 2,
- .CP0_Status_rw_bitmask = 0x32F8FFFF,
+ .CP0_Status_rw_bitmask = 0x12F8FFFF,
.SEGBITS = 42,
.PABITS = 36,
.insn_flags = CPU_MIPS64R2,
diff --git a/target-moxie/machine.c b/target-moxie/machine.c
index da1a8572a8..b9316f0ec3 100644
--- a/target-moxie/machine.c
+++ b/target-moxie/machine.c
@@ -1,5 +1,6 @@
#include "hw/hw.h"
#include "hw/boards.h"
+#include "machine.h"
const VMStateDescription vmstate_moxie_cpu = {
.name = "cpu",
diff --git a/target-moxie/translate.c b/target-moxie/translate.c
index 564f3eedaf..c416eca44b 100644
--- a/target-moxie/translate.c
+++ b/target-moxie/translate.c
@@ -827,14 +827,12 @@ gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
CPUState *cs = CPU(cpu);
DisasContext ctx;
target_ulong pc_start;
- uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int j, lj = -1;
CPUMoxieState *env = &cpu->env;
int num_insns;
pc_start = tb->pc;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
ctx.pc = pc_start;
ctx.saved_pc = -1;
ctx.tb = tb;
@@ -857,7 +855,7 @@ gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
@@ -879,7 +877,7 @@ gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) {
break;
}
- } while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end);
+ } while (ctx.bstate == BS_NONE && !tcg_op_buf_full());
if (cs->singlestep_enabled) {
tcg_gen_movi_tl(cpu_pc, ctx.pc);
@@ -900,9 +898,9 @@ gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
}
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c
index b90181dc70..6ef447b77b 100644
--- a/target-openrisc/translate.c
+++ b/target-openrisc/translate.c
@@ -1642,7 +1642,6 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
{
CPUState *cs = CPU(cpu);
struct DisasContext ctx, *dc = &ctx;
- uint16_t *gen_opc_end;
uint32_t pc_start;
int j, k;
uint32_t next_page_start;
@@ -1652,7 +1651,6 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
pc_start = tb->pc;
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
dc->is_jmp = DISAS_NEXT;
dc->ppc = pc_start;
dc->pc = pc_start;
@@ -1680,7 +1678,7 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
do {
check_breakpoint(cpu, dc);
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (k < j) {
k++;
while (k < j) {
@@ -1721,7 +1719,7 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
}
}
} while (!dc->is_jmp
- && tcg_ctx.gen_opc_ptr < gen_opc_end
+ && !tcg_op_buf_full()
&& !cs->singlestep_enabled
&& !singlestep
&& (dc->pc < next_page_start)
@@ -1759,9 +1757,9 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
k++;
while (k <= j) {
tcg_ctx.gen_opc_instr_start[k++] = 0;
@@ -1775,9 +1773,8 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("\n");
log_target_disas(&cpu->env, pc_start, dc->pc - pc_start, 0);
- qemu_log("\nisize=%d osize=%td\n",
- dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
- tcg_ctx.gen_opc_buf);
+ qemu_log("\nisize=%d osize=%d\n",
+ dc->pc - pc_start, tcg_op_buf_count());
}
#endif
}
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 7c801f36e3..88c18e37b3 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -11415,14 +11415,12 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
DisasContext ctx, *ctxp = &ctx;
opc_handler_t **table, *handler;
target_ulong pc_start;
- uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int j, lj = -1;
int num_insns;
int max_insns;
pc_start = tb->pc;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
ctx.nip = pc_start;
ctx.tb = tb;
ctx.exception = POWERPC_EXCP_NONE;
@@ -11481,8 +11479,7 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
gen_tb_start(tb);
tcg_clear_temp_count();
/* Set env in case of segfault during code fetch */
- while (ctx.exception == POWERPC_EXCP_NONE
- && tcg_ctx.gen_opc_ptr < gen_opc_end) {
+ while (ctx.exception == POWERPC_EXCP_NONE && !tcg_op_buf_full()) {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == ctx.nip) {
@@ -11492,7 +11489,7 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
}
}
if (unlikely(search_pc)) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
@@ -11598,9 +11595,9 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
tcg_gen_exit_tb(0);
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (unlikely(search_pc)) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-s390x/translate.c b/target-s390x/translate.c
index 8b36eca718..c73ea61b16 100644
--- a/target-s390x/translate.c
+++ b/target-s390x/translate.c
@@ -4832,7 +4832,6 @@ static inline void gen_intermediate_code_internal(S390CPU *cpu,
DisasContext dc;
target_ulong pc_start;
uint64_t next_page_start;
- uint16_t *gen_opc_end;
int j, lj = -1;
int num_insns, max_insns;
CPUBreakpoint *bp;
@@ -4851,8 +4850,6 @@ static inline void gen_intermediate_code_internal(S390CPU *cpu,
dc.cc_op = CC_OP_DYNAMIC;
do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
@@ -4865,7 +4862,7 @@ static inline void gen_intermediate_code_internal(S390CPU *cpu,
do {
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
@@ -4903,7 +4900,7 @@ static inline void gen_intermediate_code_internal(S390CPU *cpu,
or exhaust instruction count, stop generation. */
if (status == NO_EXIT
&& (dc.pc >= next_page_start
- || tcg_ctx.gen_opc_ptr >= gen_opc_end
+ || tcg_op_buf_full()
|| num_insns >= max_insns
|| singlestep
|| cs->singlestep_enabled)) {
@@ -4938,9 +4935,9 @@ static inline void gen_intermediate_code_internal(S390CPU *cpu,
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index 7010740b21..4c95ac7a83 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -1865,14 +1865,12 @@ gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
CPUSH4State *env = &cpu->env;
DisasContext ctx;
target_ulong pc_start;
- static uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int i, ii;
int num_insns;
int max_insns;
pc_start = tb->pc;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
ctx.pc = pc_start;
ctx.flags = (uint32_t)tb->flags;
ctx.bstate = BS_NONE;
@@ -1891,7 +1889,7 @@ gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
if (max_insns == 0)
max_insns = CF_COUNT_MASK;
gen_tb_start(tb);
- while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end) {
+ while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (ctx.pc == bp->pc) {
@@ -1904,7 +1902,7 @@ gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
}
}
if (search_pc) {
- i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ i = tcg_op_buf_count();
if (ii < i) {
ii++;
while (ii < i)
@@ -1962,9 +1960,9 @@ gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ i = tcg_op_buf_count();
ii++;
while (ii <= i)
tcg_ctx.gen_opc_instr_start[ii++] = 0;
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 68527d595f..bd53950078 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -5223,7 +5223,6 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
CPUState *cs = CPU(cpu);
CPUSPARCState *env = &cpu->env;
target_ulong pc_start, last_pc;
- uint16_t *gen_opc_end;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
int j, lj = -1;
@@ -5243,7 +5242,6 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
dc->address_mask_32bit = tb_am_enabled(tb->flags);
dc->singlestep = (cs->singlestep_enabled || singlestep);
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
@@ -5265,7 +5263,7 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
}
if (spc) {
qemu_log("Search PC...\n");
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
@@ -5298,7 +5296,7 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
if (dc->singlestep) {
break;
}
- } while ((tcg_ctx.gen_opc_ptr < gen_opc_end) &&
+ } while (!tcg_op_buf_full() &&
(dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
num_insns < max_insns);
@@ -5320,9 +5318,9 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
}
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (spc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-tricore/translate.c b/target-tricore/translate.c
index a73b7000b4..996435dbd0 100644
--- a/target-tricore/translate.c
+++ b/target-tricore/translate.c
@@ -5500,7 +5500,6 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
DisasContext ctx;
target_ulong pc_start;
int num_insns;
- uint16_t *gen_opc_end;
if (search_pc) {
qemu_log("search pc %d\n", search_pc);
@@ -5508,7 +5507,6 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
num_insns = 0;
pc_start = tb->pc;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
ctx.pc = pc_start;
ctx.saved_pc = -1;
ctx.tb = tb;
@@ -5524,7 +5522,7 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
num_insns++;
- if (tcg_ctx.gen_opc_ptr >= gen_opc_end) {
+ if (tcg_op_buf_full()) {
gen_save_pc(ctx.next_pc);
tcg_gen_exit_tb(0);
break;
@@ -5538,7 +5536,6 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
if (search_pc) {
printf("done_generating search pc\n");
} else {
diff --git a/target-unicore32/translate.c b/target-unicore32/translate.c
index ab7e96f937..db453ef95c 100644
--- a/target-unicore32/translate.c
+++ b/target-unicore32/translate.c
@@ -1877,7 +1877,6 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
CPUUniCore32State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
- uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
uint32_t next_page_start;
@@ -1891,8 +1890,6 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
@@ -1933,7 +1930,7 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
}
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
@@ -1965,7 +1962,7 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns++;
- } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
dc->pc < next_page_start &&
@@ -2037,7 +2034,6 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@@ -2048,7 +2044,7 @@ done_generating:
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c
index 9e137fe5ec..f112e2ac44 100644
--- a/target-xtensa/translate.c
+++ b/target-xtensa/translate.c
@@ -3021,7 +3021,6 @@ void gen_intermediate_code_internal(XtensaCPU *cpu,
DisasContext dc;
int insn_count = 0;
int j, lj = -1;
- uint16_t *gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
int max_insns = tb->cflags & CF_COUNT_MASK;
uint32_t pc_start = tb->pc;
uint32_t next_page_start =
@@ -3065,7 +3064,7 @@ void gen_intermediate_code_internal(XtensaCPU *cpu,
check_breakpoint(env, &dc);
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
@@ -3117,7 +3116,7 @@ void gen_intermediate_code_internal(XtensaCPU *cpu,
insn_count < max_insns &&
dc.pc < next_page_start &&
dc.pc + xtensa_insn_len(env, &dc) <= next_page_start &&
- tcg_ctx.gen_opc_ptr < gen_opc_end);
+ !tcg_op_buf_full());
reset_litbase(&dc);
reset_sar_tracker(&dc);
@@ -3133,7 +3132,6 @@ void gen_intermediate_code_internal(XtensaCPU *cpu,
gen_jumpi(&dc, dc.pc, 0);
}
gen_tb_end(tb, insn_count);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@@ -3144,7 +3142,7 @@ void gen_intermediate_code_internal(XtensaCPU *cpu,
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
memset(tcg_ctx.gen_opc_instr_start + lj + 1, 0,
(j - lj) * sizeof(tcg_ctx.gen_opc_instr_start[0]));
} else {
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 34ae3c2857..067917c396 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -67,6 +67,37 @@ static void reset_temp(TCGArg temp)
temps[temp].mask = -1;
}
+static TCGOp *insert_op_before(TCGContext *s, TCGOp *old_op,
+ TCGOpcode opc, int nargs)
+{
+ int oi = s->gen_next_op_idx;
+ int pi = s->gen_next_parm_idx;
+ int prev = old_op->prev;
+ int next = old_op - s->gen_op_buf;
+ TCGOp *new_op;
+
+ tcg_debug_assert(oi < OPC_BUF_SIZE);
+ tcg_debug_assert(pi + nargs <= OPPARAM_BUF_SIZE);
+ s->gen_next_op_idx = oi + 1;
+ s->gen_next_parm_idx = pi + nargs;
+
+ new_op = &s->gen_op_buf[oi];
+ *new_op = (TCGOp){
+ .opc = opc,
+ .args = pi,
+ .prev = prev,
+ .next = next
+ };
+ if (prev >= 0) {
+ s->gen_op_buf[prev].next = oi;
+ } else {
+ s->gen_first_op_idx = oi;
+ }
+ old_op->prev = oi;
+
+ return new_op;
+}
+
/* Reset all temporaries, given that there are NB_TEMPS of them. */
static void reset_all_temps(int nb_temps)
{
@@ -162,13 +193,13 @@ static bool temps_are_copies(TCGArg arg1, TCGArg arg2)
return false;
}
-static void tcg_opt_gen_mov(TCGContext *s, int op_index, TCGArg *gen_args,
+static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg *args,
TCGOpcode old_op, TCGArg dst, TCGArg src)
{
TCGOpcode new_op = op_to_mov(old_op);
tcg_target_ulong mask;
- s->gen_opc_buf[op_index] = new_op;
+ op->opc = new_op;
reset_temp(dst);
mask = temps[src].mask;
@@ -193,17 +224,17 @@ static void tcg_opt_gen_mov(TCGContext *s, int op_index, TCGArg *gen_args,
temps[src].next_copy = dst;
}
- gen_args[0] = dst;
- gen_args[1] = src;
+ args[0] = dst;
+ args[1] = src;
}
-static void tcg_opt_gen_movi(TCGContext *s, int op_index, TCGArg *gen_args,
+static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg *args,
TCGOpcode old_op, TCGArg dst, TCGArg val)
{
TCGOpcode new_op = op_to_movi(old_op);
tcg_target_ulong mask;
- s->gen_opc_buf[op_index] = new_op;
+ op->opc = new_op;
reset_temp(dst);
temps[dst].state = TCG_TEMP_CONST;
@@ -215,8 +246,8 @@ static void tcg_opt_gen_movi(TCGContext *s, int op_index, TCGArg *gen_args,
}
temps[dst].mask = mask;
- gen_args[0] = dst;
- gen_args[1] = val;
+ args[0] = dst;
+ args[1] = val;
}
static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
@@ -533,11 +564,9 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
}
/* Propagate constants and copies, fold constant expressions. */
-static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
- TCGArg *args, TCGOpDef *tcg_op_defs)
+static void tcg_constant_folding(TCGContext *s)
{
- int nb_ops, op_index, nb_temps, nb_globals;
- TCGArg *gen_args;
+ int oi, oi_next, nb_temps, nb_globals;
/* Array VALS has an element for each temp.
If this temp holds a constant then its value is kept in VALS' element.
@@ -548,24 +577,23 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
nb_globals = s->nb_globals;
reset_all_temps(nb_temps);
- nb_ops = tcg_opc_ptr - s->gen_opc_buf;
- gen_args = args;
- for (op_index = 0; op_index < nb_ops; op_index++) {
- TCGOpcode op = s->gen_opc_buf[op_index];
- const TCGOpDef *def = &tcg_op_defs[op];
+ for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) {
tcg_target_ulong mask, partmask, affected;
- int nb_oargs, nb_iargs, nb_args, i;
+ int nb_oargs, nb_iargs, i;
TCGArg tmp;
- if (op == INDEX_op_call) {
- *gen_args++ = tmp = *args++;
- nb_oargs = tmp >> 16;
- nb_iargs = tmp & 0xffff;
- nb_args = nb_oargs + nb_iargs + def->nb_cargs;
+ TCGOp * const op = &s->gen_op_buf[oi];
+ TCGArg * const args = &s->gen_opparam_buf[op->args];
+ TCGOpcode opc = op->opc;
+ const TCGOpDef *def = &tcg_op_defs[opc];
+
+ oi_next = op->next;
+ if (opc == INDEX_op_call) {
+ nb_oargs = op->callo;
+ nb_iargs = op->calli;
} else {
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
- nb_args = def->nb_args;
}
/* Do copy propagation */
@@ -576,7 +604,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
}
/* For commutative operations make constant second argument */
- switch (op) {
+ switch (opc) {
CASE_OP_32_64(add):
CASE_OP_32_64(mul):
CASE_OP_32_64(and):
@@ -634,7 +662,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
/* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
and "sub r, 0, a => neg r, a" case. */
- switch (op) {
+ switch (opc) {
CASE_OP_32_64(shl):
CASE_OP_32_64(shr):
CASE_OP_32_64(sar):
@@ -642,9 +670,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
CASE_OP_32_64(rotr):
if (temps[args[1]].state == TCG_TEMP_CONST
&& temps[args[1]].val == 0) {
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
- args += 3;
- gen_args += 2;
+ tcg_opt_gen_movi(s, op, args, opc, args[0], 0);
continue;
}
break;
@@ -657,7 +683,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
/* Proceed with possible constant folding. */
break;
}
- if (op == INDEX_op_sub_i32) {
+ if (opc == INDEX_op_sub_i32) {
neg_op = INDEX_op_neg_i32;
have_neg = TCG_TARGET_HAS_neg_i32;
} else {
@@ -669,12 +695,9 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
}
if (temps[args[1]].state == TCG_TEMP_CONST
&& temps[args[1]].val == 0) {
- s->gen_opc_buf[op_index] = neg_op;
+ op->opc = neg_op;
reset_temp(args[0]);
- gen_args[0] = args[0];
- gen_args[1] = args[2];
- args += 3;
- gen_args += 2;
+ args[1] = args[2];
continue;
}
}
@@ -728,12 +751,9 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
if (!have_not) {
break;
}
- s->gen_opc_buf[op_index] = not_op;
+ op->opc = not_op;
reset_temp(args[0]);
- gen_args[0] = args[0];
- gen_args[1] = args[i];
- args += 3;
- gen_args += 2;
+ args[1] = args[i];
continue;
}
default:
@@ -741,7 +761,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
}
/* Simplify expression for "op r, a, const => mov r, a" cases */
- switch (op) {
+ switch (opc) {
CASE_OP_32_64(add):
CASE_OP_32_64(sub):
CASE_OP_32_64(shl):
@@ -769,12 +789,10 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
break;
do_mov3:
if (temps_are_copies(args[0], args[1])) {
- s->gen_opc_buf[op_index] = INDEX_op_nop;
+ tcg_op_remove(s, op);
} else {
- tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]);
- gen_args += 2;
+ tcg_opt_gen_mov(s, op, args, opc, args[0], args[1]);
}
- args += 3;
continue;
default:
break;
@@ -784,7 +802,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
output argument is supported. */
mask = -1;
affected = -1;
- switch (op) {
+ switch (opc) {
CASE_OP_32_64(ext8s):
if ((temps[args[1]].mask & 0x80) != 0) {
break;
@@ -923,38 +941,31 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
if (partmask == 0) {
assert(nb_oargs == 1);
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
- args += nb_args;
- gen_args += 2;
+ tcg_opt_gen_movi(s, op, args, opc, args[0], 0);
continue;
}
if (affected == 0) {
assert(nb_oargs == 1);
if (temps_are_copies(args[0], args[1])) {
- s->gen_opc_buf[op_index] = INDEX_op_nop;
+ tcg_op_remove(s, op);
} else if (temps[args[1]].state != TCG_TEMP_CONST) {
- tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]);
- gen_args += 2;
+ tcg_opt_gen_mov(s, op, args, opc, args[0], args[1]);
} else {
- tcg_opt_gen_movi(s, op_index, gen_args, op,
+ tcg_opt_gen_movi(s, op, args, opc,
args[0], temps[args[1]].val);
- gen_args += 2;
}
- args += nb_args;
continue;
}
/* Simplify expression for "op r, a, 0 => movi r, 0" cases */
- switch (op) {
+ switch (opc) {
CASE_OP_32_64(and):
CASE_OP_32_64(mul):
CASE_OP_32_64(muluh):
CASE_OP_32_64(mulsh):
if ((temps[args[2]].state == TCG_TEMP_CONST
&& temps[args[2]].val == 0)) {
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
- args += 3;
- gen_args += 2;
+ tcg_opt_gen_movi(s, op, args, opc, args[0], 0);
continue;
}
break;
@@ -963,18 +974,15 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
}
/* Simplify expression for "op r, a, a => mov r, a" cases */
- switch (op) {
+ switch (opc) {
CASE_OP_32_64(or):
CASE_OP_32_64(and):
if (temps_are_copies(args[1], args[2])) {
if (temps_are_copies(args[0], args[1])) {
- s->gen_opc_buf[op_index] = INDEX_op_nop;
+ tcg_op_remove(s, op);
} else {
- tcg_opt_gen_mov(s, op_index, gen_args, op,
- args[0], args[1]);
- gen_args += 2;
+ tcg_opt_gen_mov(s, op, args, opc, args[0], args[1]);
}
- args += 3;
continue;
}
break;
@@ -983,14 +991,12 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
}
/* Simplify expression for "op r, a, a => movi r, 0" cases */
- switch (op) {
+ switch (opc) {
CASE_OP_32_64(andc):
CASE_OP_32_64(sub):
CASE_OP_32_64(xor):
if (temps_are_copies(args[1], args[2])) {
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
- gen_args += 2;
- args += 3;
+ tcg_opt_gen_movi(s, op, args, opc, args[0], 0);
continue;
}
break;
@@ -1001,17 +1007,14 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
/* Propagate constants through copy operations and do constant
folding. Constants will be substituted to arguments by register
allocator where needed and possible. Also detect copies. */
- switch (op) {
+ switch (opc) {
CASE_OP_32_64(mov):
if (temps_are_copies(args[0], args[1])) {
- args += 2;
- s->gen_opc_buf[op_index] = INDEX_op_nop;
+ tcg_op_remove(s, op);
break;
}
if (temps[args[1]].state != TCG_TEMP_CONST) {
- tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]);
- gen_args += 2;
- args += 2;
+ tcg_opt_gen_mov(s, op, args, opc, args[0], args[1]);
break;
}
/* Source argument is constant. Rewrite the operation and
@@ -1019,9 +1022,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
args[1] = temps[args[1]].val;
/* fallthrough */
CASE_OP_32_64(movi):
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], args[1]);
- gen_args += 2;
- args += 2;
+ tcg_opt_gen_movi(s, op, args, opc, args[0], args[1]);
break;
CASE_OP_32_64(not):
@@ -1033,20 +1034,16 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
case INDEX_op_ext32s_i64:
case INDEX_op_ext32u_i64:
if (temps[args[1]].state == TCG_TEMP_CONST) {
- tmp = do_constant_folding(op, temps[args[1]].val, 0);
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
- gen_args += 2;
- args += 2;
+ tmp = do_constant_folding(opc, temps[args[1]].val, 0);
+ tcg_opt_gen_movi(s, op, args, opc, args[0], tmp);
break;
}
goto do_default;
case INDEX_op_trunc_shr_i32:
if (temps[args[1]].state == TCG_TEMP_CONST) {
- tmp = do_constant_folding(op, temps[args[1]].val, args[2]);
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
- gen_args += 2;
- args += 3;
+ tmp = do_constant_folding(opc, temps[args[1]].val, args[2]);
+ tcg_opt_gen_movi(s, op, args, opc, args[0], tmp);
break;
}
goto do_default;
@@ -1075,11 +1072,9 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
CASE_OP_32_64(remu):
if (temps[args[1]].state == TCG_TEMP_CONST
&& temps[args[2]].state == TCG_TEMP_CONST) {
- tmp = do_constant_folding(op, temps[args[1]].val,
+ tmp = do_constant_folding(opc, temps[args[1]].val,
temps[args[2]].val);
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
- gen_args += 2;
- args += 3;
+ tcg_opt_gen_movi(s, op, args, opc, args[0], tmp);
break;
}
goto do_default;
@@ -1089,54 +1084,44 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
&& temps[args[2]].state == TCG_TEMP_CONST) {
tmp = deposit64(temps[args[1]].val, args[3], args[4],
temps[args[2]].val);
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
- gen_args += 2;
- args += 5;
+ tcg_opt_gen_movi(s, op, args, opc, args[0], tmp);
break;
}
goto do_default;
CASE_OP_32_64(setcond):
- tmp = do_constant_folding_cond(op, args[1], args[2], args[3]);
+ tmp = do_constant_folding_cond(opc, args[1], args[2], args[3]);
if (tmp != 2) {
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
- gen_args += 2;
- args += 4;
+ tcg_opt_gen_movi(s, op, args, opc, args[0], tmp);
break;
}
goto do_default;
CASE_OP_32_64(brcond):
- tmp = do_constant_folding_cond(op, args[0], args[1], args[2]);
+ tmp = do_constant_folding_cond(opc, args[0], args[1], args[2]);
if (tmp != 2) {
if (tmp) {
reset_all_temps(nb_temps);
- s->gen_opc_buf[op_index] = INDEX_op_br;
- gen_args[0] = args[3];
- gen_args += 1;
+ op->opc = INDEX_op_br;
+ args[0] = args[3];
} else {
- s->gen_opc_buf[op_index] = INDEX_op_nop;
+ tcg_op_remove(s, op);
}
- args += 4;
break;
}
goto do_default;
CASE_OP_32_64(movcond):
- tmp = do_constant_folding_cond(op, args[1], args[2], args[5]);
+ tmp = do_constant_folding_cond(opc, args[1], args[2], args[5]);
if (tmp != 2) {
if (temps_are_copies(args[0], args[4-tmp])) {
- s->gen_opc_buf[op_index] = INDEX_op_nop;
+ tcg_op_remove(s, op);
} else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) {
- tcg_opt_gen_movi(s, op_index, gen_args, op,
+ tcg_opt_gen_movi(s, op, args, opc,
args[0], temps[args[4-tmp]].val);
- gen_args += 2;
} else {
- tcg_opt_gen_mov(s, op_index, gen_args, op,
- args[0], args[4-tmp]);
- gen_args += 2;
+ tcg_opt_gen_mov(s, op, args, opc, args[0], args[4-tmp]);
}
- args += 6;
break;
}
goto do_default;
@@ -1154,24 +1139,22 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
uint64_t a = ((uint64_t)ah << 32) | al;
uint64_t b = ((uint64_t)bh << 32) | bl;
TCGArg rl, rh;
+ TCGOp *op2 = insert_op_before(s, op, INDEX_op_movi_i32, 2);
+ TCGArg *args2 = &s->gen_opparam_buf[op2->args];
- if (op == INDEX_op_add2_i32) {
+ if (opc == INDEX_op_add2_i32) {
a += b;
} else {
a -= b;
}
- /* We emit the extra nop when we emit the add2/sub2. */
- assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
-
rl = args[0];
rh = args[1];
- tcg_opt_gen_movi(s, op_index, &gen_args[0],
- op, rl, (uint32_t)a);
- tcg_opt_gen_movi(s, ++op_index, &gen_args[2],
- op, rh, (uint32_t)(a >> 32));
- gen_args += 4;
- args += 6;
+ tcg_opt_gen_movi(s, op, args, opc, rl, (uint32_t)a);
+ tcg_opt_gen_movi(s, op2, args2, opc, rh, (uint32_t)(a >> 32));
+
+ /* We've done all we need to do with the movi. Skip it. */
+ oi_next = op2->next;
break;
}
goto do_default;
@@ -1183,18 +1166,16 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
uint32_t b = temps[args[3]].val;
uint64_t r = (uint64_t)a * b;
TCGArg rl, rh;
-
- /* We emit the extra nop when we emit the mulu2. */
- assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
+ TCGOp *op2 = insert_op_before(s, op, INDEX_op_movi_i32, 2);
+ TCGArg *args2 = &s->gen_opparam_buf[op2->args];
rl = args[0];
rh = args[1];
- tcg_opt_gen_movi(s, op_index, &gen_args[0],
- op, rl, (uint32_t)r);
- tcg_opt_gen_movi(s, ++op_index, &gen_args[2],
- op, rh, (uint32_t)(r >> 32));
- gen_args += 4;
- args += 4;
+ tcg_opt_gen_movi(s, op, args, opc, rl, (uint32_t)r);
+ tcg_opt_gen_movi(s, op2, args2, opc, rh, (uint32_t)(r >> 32));
+
+ /* We've done all we need to do with the movi. Skip it. */
+ oi_next = op2->next;
break;
}
goto do_default;
@@ -1205,12 +1186,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
if (tmp) {
do_brcond_true:
reset_all_temps(nb_temps);
- s->gen_opc_buf[op_index] = INDEX_op_br;
- gen_args[0] = args[5];
- gen_args += 1;
+ op->opc = INDEX_op_br;
+ args[0] = args[5];
} else {
do_brcond_false:
- s->gen_opc_buf[op_index] = INDEX_op_nop;
+ tcg_op_remove(s, op);
}
} else if ((args[4] == TCG_COND_LT || args[4] == TCG_COND_GE)
&& temps[args[2]].state == TCG_TEMP_CONST
@@ -1221,12 +1201,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
vs the high word of the input. */
do_brcond_high:
reset_all_temps(nb_temps);
- s->gen_opc_buf[op_index] = INDEX_op_brcond_i32;
- gen_args[0] = args[1];
- gen_args[1] = args[3];
- gen_args[2] = args[4];
- gen_args[3] = args[5];
- gen_args += 4;
+ op->opc = INDEX_op_brcond_i32;
+ args[0] = args[1];
+ args[1] = args[3];
+ args[2] = args[4];
+ args[3] = args[5];
} else if (args[4] == TCG_COND_EQ) {
/* Simplify EQ comparisons where one of the pairs
can be simplified. */
@@ -1246,12 +1225,10 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
}
do_brcond_low:
reset_all_temps(nb_temps);
- s->gen_opc_buf[op_index] = INDEX_op_brcond_i32;
- gen_args[0] = args[0];
- gen_args[1] = args[2];
- gen_args[2] = args[4];
- gen_args[3] = args[5];
- gen_args += 4;
+ op->opc = INDEX_op_brcond_i32;
+ args[1] = args[2];
+ args[2] = args[4];
+ args[3] = args[5];
} else if (args[4] == TCG_COND_NE) {
/* Simplify NE comparisons where one of the pairs
can be simplified. */
@@ -1273,15 +1250,13 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
} else {
goto do_default;
}
- args += 6;
break;
case INDEX_op_setcond2_i32:
tmp = do_constant_folding_cond2(&args[1], &args[3], args[5]);
if (tmp != 2) {
do_setcond_const:
- tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
- gen_args += 2;
+ tcg_opt_gen_movi(s, op, args, opc, args[0], tmp);
} else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE)
&& temps[args[3]].state == TCG_TEMP_CONST
&& temps[args[4]].state == TCG_TEMP_CONST
@@ -1290,14 +1265,12 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
/* Simplify LT/GE comparisons vs zero to a single compare
vs the high word of the input. */
do_setcond_high:
- s->gen_opc_buf[op_index] = INDEX_op_setcond_i32;
reset_temp(args[0]);
temps[args[0]].mask = 1;
- gen_args[0] = args[0];
- gen_args[1] = args[2];
- gen_args[2] = args[4];
- gen_args[3] = args[5];
- gen_args += 4;
+ op->opc = INDEX_op_setcond_i32;
+ args[1] = args[2];
+ args[2] = args[4];
+ args[3] = args[5];
} else if (args[5] == TCG_COND_EQ) {
/* Simplify EQ comparisons where one of the pairs
can be simplified. */
@@ -1318,12 +1291,9 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
do_setcond_low:
reset_temp(args[0]);
temps[args[0]].mask = 1;
- s->gen_opc_buf[op_index] = INDEX_op_setcond_i32;
- gen_args[0] = args[0];
- gen_args[1] = args[1];
- gen_args[2] = args[3];
- gen_args[3] = args[5];
- gen_args += 4;
+ op->opc = INDEX_op_setcond_i32;
+ args[2] = args[3];
+ args[3] = args[5];
} else if (args[5] == TCG_COND_NE) {
/* Simplify NE comparisons where one of the pairs
can be simplified. */
@@ -1345,7 +1315,6 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
} else {
goto do_default;
}
- args += 6;
break;
case INDEX_op_call:
@@ -1377,22 +1346,12 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
}
}
}
- for (i = 0; i < nb_args; i++) {
- gen_args[i] = args[i];
- }
- args += nb_args;
- gen_args += nb_args;
break;
}
}
-
- return gen_args;
}
-TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr,
- TCGArg *args, TCGOpDef *tcg_op_defs)
+void tcg_optimize(TCGContext *s)
{
- TCGArg *res;
- res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs);
- return res;
+ tcg_constant_folding(s);
}
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
new file mode 100644
index 0000000000..afa351dc70
--- /dev/null
+++ b/tcg/tcg-op.c
@@ -0,0 +1,1934 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "tcg.h"
+#include "tcg-op.h"
+
+/* Reduce the number of ifdefs below. This assumes that all uses of
+ TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
+ the compiler can eliminate. */
+#if TCG_TARGET_REG_BITS == 64
+extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64);
+extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
+#define TCGV_LOW TCGV_LOW_link_error
+#define TCGV_HIGH TCGV_HIGH_link_error
+#endif
+
+/* Note that this is optimized for sequential allocation during translate.
+ Up to and including filling in the forward link immediately. We'll do
+ proper termination of the end of the list after we finish translation. */
+
+static void tcg_emit_op(TCGContext *ctx, TCGOpcode opc, int args)
+{
+ int oi = ctx->gen_next_op_idx;
+ int ni = oi + 1;
+ int pi = oi - 1;
+
+ tcg_debug_assert(oi < OPC_BUF_SIZE);
+ ctx->gen_last_op_idx = oi;
+ ctx->gen_next_op_idx = ni;
+
+ ctx->gen_op_buf[oi] = (TCGOp){
+ .opc = opc,
+ .args = args,
+ .prev = pi,
+ .next = ni
+ };
+}
+
+void tcg_gen_op1(TCGContext *ctx, TCGOpcode opc, TCGArg a1)
+{
+ int pi = ctx->gen_next_parm_idx;
+
+ tcg_debug_assert(pi + 1 <= OPPARAM_BUF_SIZE);
+ ctx->gen_next_parm_idx = pi + 1;
+ ctx->gen_opparam_buf[pi] = a1;
+
+ tcg_emit_op(ctx, opc, pi);
+}
+
+void tcg_gen_op2(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2)
+{
+ int pi = ctx->gen_next_parm_idx;
+
+ tcg_debug_assert(pi + 2 <= OPPARAM_BUF_SIZE);
+ ctx->gen_next_parm_idx = pi + 2;
+ ctx->gen_opparam_buf[pi + 0] = a1;
+ ctx->gen_opparam_buf[pi + 1] = a2;
+
+ tcg_emit_op(ctx, opc, pi);
+}
+
+void tcg_gen_op3(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
+ TCGArg a2, TCGArg a3)
+{
+ int pi = ctx->gen_next_parm_idx;
+
+ tcg_debug_assert(pi + 3 <= OPPARAM_BUF_SIZE);
+ ctx->gen_next_parm_idx = pi + 3;
+ ctx->gen_opparam_buf[pi + 0] = a1;
+ ctx->gen_opparam_buf[pi + 1] = a2;
+ ctx->gen_opparam_buf[pi + 2] = a3;
+
+ tcg_emit_op(ctx, opc, pi);
+}
+
+void tcg_gen_op4(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
+ TCGArg a2, TCGArg a3, TCGArg a4)
+{
+ int pi = ctx->gen_next_parm_idx;
+
+ tcg_debug_assert(pi + 4 <= OPPARAM_BUF_SIZE);
+ ctx->gen_next_parm_idx = pi + 4;
+ ctx->gen_opparam_buf[pi + 0] = a1;
+ ctx->gen_opparam_buf[pi + 1] = a2;
+ ctx->gen_opparam_buf[pi + 2] = a3;
+ ctx->gen_opparam_buf[pi + 3] = a4;
+
+ tcg_emit_op(ctx, opc, pi);
+}
+
+void tcg_gen_op5(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
+ TCGArg a2, TCGArg a3, TCGArg a4, TCGArg a5)
+{
+ int pi = ctx->gen_next_parm_idx;
+
+ tcg_debug_assert(pi + 5 <= OPPARAM_BUF_SIZE);
+ ctx->gen_next_parm_idx = pi + 5;
+ ctx->gen_opparam_buf[pi + 0] = a1;
+ ctx->gen_opparam_buf[pi + 1] = a2;
+ ctx->gen_opparam_buf[pi + 2] = a3;
+ ctx->gen_opparam_buf[pi + 3] = a4;
+ ctx->gen_opparam_buf[pi + 4] = a5;
+
+ tcg_emit_op(ctx, opc, pi);
+}
+
+void tcg_gen_op6(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2,
+ TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6)
+{
+ int pi = ctx->gen_next_parm_idx;
+
+ tcg_debug_assert(pi + 6 <= OPPARAM_BUF_SIZE);
+ ctx->gen_next_parm_idx = pi + 6;
+ ctx->gen_opparam_buf[pi + 0] = a1;
+ ctx->gen_opparam_buf[pi + 1] = a2;
+ ctx->gen_opparam_buf[pi + 2] = a3;
+ ctx->gen_opparam_buf[pi + 3] = a4;
+ ctx->gen_opparam_buf[pi + 4] = a5;
+ ctx->gen_opparam_buf[pi + 5] = a6;
+
+ tcg_emit_op(ctx, opc, pi);
+}
+
+/* 32 bit ops */
+
+void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_add_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2)
+{
+ if (arg1 == 0 && TCG_TARGET_HAS_neg_i32) {
+ /* Don't recurse with tcg_gen_neg_i32. */
+ tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg2);
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg1);
+ tcg_gen_sub_i32(ret, t0, arg2);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_sub_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
+{
+ TCGv_i32 t0;
+ /* Some cases can be optimized here. */
+ switch (arg2) {
+ case 0:
+ tcg_gen_movi_i32(ret, 0);
+ return;
+ case 0xffffffffu:
+ tcg_gen_mov_i32(ret, arg1);
+ return;
+ case 0xffu:
+ /* Don't recurse with tcg_gen_ext8u_i32. */
+ if (TCG_TARGET_HAS_ext8u_i32) {
+ tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1);
+ return;
+ }
+ break;
+ case 0xffffu:
+ if (TCG_TARGET_HAS_ext16u_i32) {
+ tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1);
+ return;
+ }
+ break;
+ }
+ t0 = tcg_const_i32(arg2);
+ tcg_gen_and_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+}
+
+void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
+{
+ /* Some cases can be optimized here. */
+ if (arg2 == -1) {
+ tcg_gen_movi_i32(ret, -1);
+ } else if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_or_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
+{
+ /* Some cases can be optimized here. */
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) {
+ /* Don't recurse with tcg_gen_not_i32. */
+ tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_xor_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 32);
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_shl_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 32);
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_shr_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 32);
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_sar_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, int label)
+{
+ if (cond == TCG_COND_ALWAYS) {
+ tcg_gen_br(label);
+ } else if (cond != TCG_COND_NEVER) {
+ tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label);
+ }
+}
+
+void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, int label)
+{
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_brcond_i32(cond, arg1, t0, label);
+ tcg_temp_free_i32(t0);
+}
+
+void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (cond == TCG_COND_ALWAYS) {
+ tcg_gen_movi_i32(ret, 1);
+ } else if (cond == TCG_COND_NEVER) {
+ tcg_gen_movi_i32(ret, 0);
+ } else {
+ tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond);
+ }
+}
+
+void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, int32_t arg2)
+{
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_setcond_i32(cond, ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+}
+
+void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
+{
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_mul_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+}
+
+void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_div_i32) {
+ tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_div2_i32) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t0, arg1, 31);
+ tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);
+ tcg_temp_free_i32(t0);
+ } else {
+ gen_helper_div_i32(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_rem_i32) {
+ tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_div_i32) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
+ tcg_gen_mul_i32(t0, t0, arg2);
+ tcg_gen_sub_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ } else if (TCG_TARGET_HAS_div2_i32) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t0, arg1, 31);
+ tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);
+ tcg_temp_free_i32(t0);
+ } else {
+ gen_helper_rem_i32(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_div_i32) {
+ tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_div2_i32) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(t0, 0);
+ tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);
+ tcg_temp_free_i32(t0);
+ } else {
+ gen_helper_divu_i32(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_rem_i32) {
+ tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_div_i32) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
+ tcg_gen_mul_i32(t0, t0, arg2);
+ tcg_gen_sub_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ } else if (TCG_TARGET_HAS_div2_i32) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(t0, 0);
+ tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);
+ tcg_temp_free_i32(t0);
+ } else {
+ gen_helper_remu_i32(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_not_i32(t0, arg2);
+ tcg_gen_and_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_eqv_i32) {
+ tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2);
+ } else {
+ tcg_gen_xor_i32(ret, arg1, arg2);
+ tcg_gen_not_i32(ret, ret);
+ }
+}
+
+void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_nand_i32) {
+ tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2);
+ } else {
+ tcg_gen_and_i32(ret, arg1, arg2);
+ tcg_gen_not_i32(ret, ret);
+ }
+}
+
+void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_nor_i32) {
+ tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2);
+ } else {
+ tcg_gen_or_i32(ret, arg1, arg2);
+ tcg_gen_not_i32(ret, ret);
+ }
+}
+
+void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_orc_i32) {
+ tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_not_i32(t0, arg2);
+ tcg_gen_or_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_rot_i32) {
+ tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2);
+ } else {
+ TCGv_i32 t0, t1;
+
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_shl_i32(t0, arg1, arg2);
+ tcg_gen_subfi_i32(t1, 32, arg2);
+ tcg_gen_shr_i32(t1, arg1, t1);
+ tcg_gen_or_i32(ret, t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+}
+
+void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 32);
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else if (TCG_TARGET_HAS_rot_i32) {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_rotl_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ } else {
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_shli_i32(t0, arg1, arg2);
+ tcg_gen_shri_i32(t1, arg1, 32 - arg2);
+ tcg_gen_or_i32(ret, t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+}
+
+void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_rot_i32) {
+ tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2);
+ } else {
+ TCGv_i32 t0, t1;
+
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_shr_i32(t0, arg1, arg2);
+ tcg_gen_subfi_i32(t1, 32, arg2);
+ tcg_gen_shl_i32(t1, arg1, t1);
+ tcg_gen_or_i32(ret, t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+}
+
+void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 32);
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ tcg_gen_rotli_i32(ret, arg1, 32 - arg2);
+ }
+}
+
+void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
+ unsigned int ofs, unsigned int len)
+{
+ uint32_t mask;
+ TCGv_i32 t1;
+
+ tcg_debug_assert(ofs < 32);
+ tcg_debug_assert(len <= 32);
+ tcg_debug_assert(ofs + len <= 32);
+
+ if (ofs == 0 && len == 32) {
+ tcg_gen_mov_i32(ret, arg2);
+ return;
+ }
+ if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) {
+ tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len);
+ return;
+ }
+
+ mask = (1u << len) - 1;
+ t1 = tcg_temp_new_i32();
+
+ if (ofs + len < 32) {
+ tcg_gen_andi_i32(t1, arg2, mask);
+ tcg_gen_shli_i32(t1, t1, ofs);
+ } else {
+ tcg_gen_shli_i32(t1, arg2, ofs);
+ }
+ tcg_gen_andi_i32(ret, arg1, ~(mask << ofs));
+ tcg_gen_or_i32(ret, ret, t1);
+
+ tcg_temp_free_i32(t1);
+}
+
+void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
+ TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2)
+{
+ if (TCG_TARGET_HAS_movcond_i32) {
+ tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(cond, t0, c1, c2);
+ tcg_gen_neg_i32(t0, t0);
+ tcg_gen_and_i32(t1, v1, t0);
+ tcg_gen_andc_i32(ret, v2, t0);
+ tcg_gen_or_i32(ret, ret, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+}
+
+void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
+{
+ if (TCG_TARGET_HAS_add2_i32) {
+ tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(t0, al, ah);
+ tcg_gen_concat_i32_i64(t1, bl, bh);
+ tcg_gen_add_i64(t0, t0, t1);
+ tcg_gen_extr_i64_i32(rl, rh, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
+{
+ if (TCG_TARGET_HAS_sub2_i32) {
+ tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(t0, al, ah);
+ tcg_gen_concat_i32_i64(t1, bl, bh);
+ tcg_gen_sub_i64(t0, t0, t1);
+ tcg_gen_extr_i64_i32(rl, rh, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_mulu2_i32) {
+ tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
+ } else if (TCG_TARGET_HAS_muluh_i32) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
+ tcg_gen_mov_i32(rl, t);
+ tcg_temp_free_i32(t);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t0, arg1);
+ tcg_gen_extu_i32_i64(t1, arg2);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_extr_i64_i32(rl, rh, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_muls2_i32) {
+ tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
+ } else if (TCG_TARGET_HAS_mulsh_i32) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
+ tcg_gen_mov_i32(rl, t);
+ tcg_temp_free_i32(t);
+ } else if (TCG_TARGET_REG_BITS == 32) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
+ /* Adjust for negative inputs. */
+ tcg_gen_sari_i32(t2, arg1, 31);
+ tcg_gen_sari_i32(t3, arg2, 31);
+ tcg_gen_and_i32(t2, t2, arg2);
+ tcg_gen_and_i32(t3, t3, arg1);
+ tcg_gen_sub_i32(rh, t1, t2);
+ tcg_gen_sub_i32(rh, rh, t3);
+ tcg_gen_mov_i32(rl, t0);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(t0, arg1);
+ tcg_gen_ext_i32_i64(t1, arg2);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_extr_i64_i32(rl, rh, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_HAS_ext8s_i32) {
+ tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg);
+ } else {
+ tcg_gen_shli_i32(ret, arg, 24);
+ tcg_gen_sari_i32(ret, ret, 24);
+ }
+}
+
+void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_HAS_ext16s_i32) {
+ tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg);
+ } else {
+ tcg_gen_shli_i32(ret, arg, 16);
+ tcg_gen_sari_i32(ret, ret, 16);
+ }
+}
+
+void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_HAS_ext8u_i32) {
+ tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg);
+ } else {
+ tcg_gen_andi_i32(ret, arg, 0xffu);
+ }
+}
+
+void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_HAS_ext16u_i32) {
+ tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg);
+ } else {
+ tcg_gen_andi_i32(ret, arg, 0xffffu);
+ }
+}
+
+/* Note: we assume the two high bytes are set to zero */
+void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_HAS_bswap16_i32) {
+ tcg_gen_op2_i32(INDEX_op_bswap16_i32, ret, arg);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ tcg_gen_ext8u_i32(t0, arg);
+ tcg_gen_shli_i32(t0, t0, 8);
+ tcg_gen_shri_i32(ret, arg, 8);
+ tcg_gen_or_i32(ret, ret, t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_HAS_bswap32_i32) {
+ tcg_gen_op2_i32(INDEX_op_bswap32_i32, ret, arg);
+ } else {
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+
+ tcg_gen_shli_i32(t0, arg, 24);
+
+ tcg_gen_andi_i32(t1, arg, 0x0000ff00);
+ tcg_gen_shli_i32(t1, t1, 8);
+ tcg_gen_or_i32(t0, t0, t1);
+
+ tcg_gen_shri_i32(t1, arg, 8);
+ tcg_gen_andi_i32(t1, t1, 0x0000ff00);
+ tcg_gen_or_i32(t0, t0, t1);
+
+ tcg_gen_shri_i32(t1, arg, 24);
+ tcg_gen_or_i32(ret, t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+}
+
+/* 64-bit ops */
+
+#if TCG_TARGET_REG_BITS == 32
+/* These are all inline for TCG_TARGET_REG_BITS == 64. */
+
+void tcg_gen_discard_i64(TCGv_i64 arg)
+{
+ tcg_gen_discard_i32(TCGV_LOW(arg));
+ tcg_gen_discard_i32(TCGV_HIGH(arg));
+}
+
+void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
+}
+
+void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
+{
+ tcg_gen_movi_i32(TCGV_LOW(ret), arg);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), arg >> 32);
+}
+
+void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
+{
+ tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
+{
+ tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), 31);
+}
+
+void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
+{
+ tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
+{
+ tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
+}
+
+void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
+{
+ tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
+{
+ tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
+}
+
+void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
+{
+ /* Since arg2 and ret have different types,
+ they cannot be the same temporary */
+#ifdef TCG_TARGET_WORDS_BIGENDIAN
+ tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
+ tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4);
+#else
+ tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
+ tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset + 4);
+#endif
+}
+
+void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
+{
+#ifdef TCG_TARGET_WORDS_BIGENDIAN
+ tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset);
+ tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4);
+#else
+ tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
+ tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset + 4);
+#endif
+}
+
+void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+}
+
+void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+}
+
+void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+}
+
+void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ gen_helper_shl_i64(ret, arg1, arg2);
+}
+
+void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ gen_helper_shr_i64(ret, arg1, arg2);
+}
+
+void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ gen_helper_sar_i64(ret, arg1, arg2);
+}
+
+void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ TCGv_i64 t0;
+ TCGv_i32 t1;
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i32();
+
+ tcg_gen_mulu2_i32(TCGV_LOW(t0), TCGV_HIGH(t0),
+ TCGV_LOW(arg1), TCGV_LOW(arg2));
+
+ tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2));
+ tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
+ tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), TCGV_LOW(arg2));
+ tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
+
+ tcg_gen_mov_i64(ret, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i32(t1);
+}
+#endif /* TCG_TARGET_REG_SIZE == 32 */
+
+void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_add_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2)
+{
+ if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) {
+ /* Don't recurse with tcg_gen_neg_i64. */
+ tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg2);
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg1);
+ tcg_gen_sub_i64(ret, t0, arg2);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_sub_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
+{
+ TCGv_i64 t0;
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
+ tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
+ return;
+ }
+
+ /* Some cases can be optimized here. */
+ switch (arg2) {
+ case 0:
+ tcg_gen_movi_i64(ret, 0);
+ return;
+ case 0xffffffffffffffffull:
+ tcg_gen_mov_i64(ret, arg1);
+ return;
+ case 0xffull:
+ /* Don't recurse with tcg_gen_ext8u_i64. */
+ if (TCG_TARGET_HAS_ext8u_i64) {
+ tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1);
+ return;
+ }
+ break;
+ case 0xffffu:
+ if (TCG_TARGET_HAS_ext16u_i64) {
+ tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1);
+ return;
+ }
+ break;
+ case 0xffffffffull:
+ if (TCG_TARGET_HAS_ext32u_i64) {
+ tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1);
+ return;
+ }
+ break;
+ }
+ t0 = tcg_const_i64(arg2);
+ tcg_gen_and_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+}
+
+void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_ori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
+ tcg_gen_ori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
+ return;
+ }
+ /* Some cases can be optimized here. */
+ if (arg2 == -1) {
+ tcg_gen_movi_i64(ret, -1);
+ } else if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_or_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_xori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
+ tcg_gen_xori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
+ return;
+ }
+ /* Some cases can be optimized here. */
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) {
+ /* Don't recurse with tcg_gen_not_i64. */
+ tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_xor_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
+ unsigned c, bool right, bool arith)
+{
+ tcg_debug_assert(c < 64);
+ if (c == 0) {
+ tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
+ tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
+ } else if (c >= 32) {
+ c -= 32;
+ if (right) {
+ if (arith) {
+ tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
+ } else {
+ tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ }
+ } else {
+ tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
+ tcg_gen_movi_i32(TCGV_LOW(ret), 0);
+ }
+ } else {
+ TCGv_i32 t0, t1;
+
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ if (right) {
+ tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
+ if (arith) {
+ tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
+ } else {
+ tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
+ }
+ tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
+ tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
+ tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
+ } else {
+ tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
+ /* Note: ret can be the same as arg1, so we use t1 */
+ tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
+ tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
+ tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
+ tcg_gen_mov_i32(TCGV_LOW(ret), t1);
+ }
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+}
+
+void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 64);
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0);
+ } else if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_shl_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 64);
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0);
+ } else if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_shr_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 64);
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1);
+ } else if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_sar_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, int label)
+{
+ if (cond == TCG_COND_ALWAYS) {
+ tcg_gen_br(label);
+ } else if (cond != TCG_COND_NEVER) {
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
+ TCGV_HIGH(arg1), TCGV_LOW(arg2),
+ TCGV_HIGH(arg2), cond, label);
+ } else {
+ tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, label);
+ }
+ }
+}
+
+void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, int label)
+{
+ if (cond == TCG_COND_ALWAYS) {
+ tcg_gen_br(label);
+ } else if (cond != TCG_COND_NEVER) {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_brcond_i64(cond, arg1, t0, label);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (cond == TCG_COND_ALWAYS) {
+ tcg_gen_movi_i64(ret, 1);
+ } else if (cond == TCG_COND_NEVER) {
+ tcg_gen_movi_i64(ret, 0);
+ } else {
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
+ TCGV_LOW(arg1), TCGV_HIGH(arg1),
+ TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else {
+ tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond);
+ }
+ }
+}
+
+void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, int64_t arg2)
+{
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_setcond_i64(cond, ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+}
+
+void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
+{
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_mul_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+}
+
+void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_div_i64) {
+ tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_div2_i64) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t0, arg1, 63);
+ tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2);
+ tcg_temp_free_i64(t0);
+ } else {
+ gen_helper_div_i64(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_rem_i64) {
+ tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_div_i64) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
+ tcg_gen_mul_i64(t0, t0, arg2);
+ tcg_gen_sub_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ } else if (TCG_TARGET_HAS_div2_i64) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t0, arg1, 63);
+ tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2);
+ tcg_temp_free_i64(t0);
+ } else {
+ gen_helper_rem_i64(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_div_i64) {
+ tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_div2_i64) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_movi_i64(t0, 0);
+ tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2);
+ tcg_temp_free_i64(t0);
+ } else {
+ gen_helper_divu_i64(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_rem_i64) {
+ tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_div_i64) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
+ tcg_gen_mul_i64(t0, t0, arg2);
+ tcg_gen_sub_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ } else if (TCG_TARGET_HAS_div2_i64) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_movi_i64(t0, 0);
+ tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2);
+ tcg_temp_free_i64(t0);
+ } else {
+ gen_helper_remu_i64(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
+ } else if (TCG_TARGET_HAS_ext8s_i64) {
+ tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg);
+ } else {
+ tcg_gen_shli_i64(ret, arg, 56);
+ tcg_gen_sari_i64(ret, ret, 56);
+ }
+}
+
+void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
+ } else if (TCG_TARGET_HAS_ext16s_i64) {
+ tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg);
+ } else {
+ tcg_gen_shli_i64(ret, arg, 48);
+ tcg_gen_sari_i64(ret, ret, 48);
+ }
+}
+
+void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
+ } else if (TCG_TARGET_HAS_ext32s_i64) {
+ tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg);
+ } else {
+ tcg_gen_shli_i64(ret, arg, 32);
+ tcg_gen_sari_i64(ret, ret, 32);
+ }
+}
+
+void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else if (TCG_TARGET_HAS_ext8u_i64) {
+ tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg);
+ } else {
+ tcg_gen_andi_i64(ret, arg, 0xffu);
+ }
+}
+
+void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else if (TCG_TARGET_HAS_ext16u_i64) {
+ tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg);
+ } else {
+ tcg_gen_andi_i64(ret, arg, 0xffffu);
+ }
+}
+
+void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else if (TCG_TARGET_HAS_ext32u_i64) {
+ tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg);
+ } else {
+ tcg_gen_andi_i64(ret, arg, 0xffffffffu);
+ }
+}
+
+/* Note: we assume the six high bytes are set to zero */
+void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else if (TCG_TARGET_HAS_bswap16_i64) {
+ tcg_gen_op2_i64(INDEX_op_bswap16_i64, ret, arg);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ tcg_gen_ext8u_i64(t0, arg);
+ tcg_gen_shli_i64(t0, t0, 8);
+ tcg_gen_shri_i64(ret, arg, 8);
+ tcg_gen_or_i64(ret, ret, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+/* Note: we assume the four high bytes are set to zero */
+void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else if (TCG_TARGET_HAS_bswap32_i64) {
+ tcg_gen_op2_i64(INDEX_op_bswap32_i64, ret, arg);
+ } else {
+ TCGv_i64 t0, t1;
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ tcg_gen_shli_i64(t0, arg, 24);
+ tcg_gen_ext32u_i64(t0, t0);
+
+ tcg_gen_andi_i64(t1, arg, 0x0000ff00);
+ tcg_gen_shli_i64(t1, t1, 8);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 8);
+ tcg_gen_andi_i64(t1, t1, 0x0000ff00);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 24);
+ tcg_gen_or_i64(ret, t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+
+ tcg_gen_bswap32_i32(t0, TCGV_LOW(arg));
+ tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg));
+ tcg_gen_mov_i32(TCGV_LOW(ret), t1);
+ tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ } else if (TCG_TARGET_HAS_bswap64_i64) {
+ tcg_gen_op2_i64(INDEX_op_bswap64_i64, ret, arg);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_shli_i64(t0, arg, 56);
+
+ tcg_gen_andi_i64(t1, arg, 0x0000ff00);
+ tcg_gen_shli_i64(t1, t1, 40);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_andi_i64(t1, arg, 0x00ff0000);
+ tcg_gen_shli_i64(t1, t1, 24);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_andi_i64(t1, arg, 0xff000000);
+ tcg_gen_shli_i64(t1, t1, 8);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 8);
+ tcg_gen_andi_i64(t1, t1, 0xff000000);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 24);
+ tcg_gen_andi_i64(t1, t1, 0x00ff0000);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 40);
+ tcg_gen_andi_i64(t1, t1, 0x0000ff00);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 56);
+ tcg_gen_or_i64(ret, t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
+ } else if (TCG_TARGET_HAS_not_i64) {
+ tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
+ } else {
+ tcg_gen_xori_i64(ret, arg, -1);
+ }
+}
+
+void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+ } else if (TCG_TARGET_HAS_andc_i64) {
+ tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_not_i64(t0, arg2);
+ tcg_gen_and_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+ } else if (TCG_TARGET_HAS_eqv_i64) {
+ tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2);
+ } else {
+ tcg_gen_xor_i64(ret, arg1, arg2);
+ tcg_gen_not_i64(ret, ret);
+ }
+}
+
+void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+ } else if (TCG_TARGET_HAS_nand_i64) {
+ tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2);
+ } else {
+ tcg_gen_and_i64(ret, arg1, arg2);
+ tcg_gen_not_i64(ret, ret);
+ }
+}
+
+void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+ } else if (TCG_TARGET_HAS_nor_i64) {
+ tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2);
+ } else {
+ tcg_gen_or_i64(ret, arg1, arg2);
+ tcg_gen_not_i64(ret, ret);
+ }
+}
+
+void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+ } else if (TCG_TARGET_HAS_orc_i64) {
+ tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_not_i64(t0, arg2);
+ tcg_gen_or_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_rot_i64) {
+ tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
+ } else {
+ TCGv_i64 t0, t1;
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ tcg_gen_shl_i64(t0, arg1, arg2);
+ tcg_gen_subfi_i64(t1, 64, arg2);
+ tcg_gen_shr_i64(t1, arg1, t1);
+ tcg_gen_or_i64(ret, t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 64);
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else if (TCG_TARGET_HAS_rot_i64) {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_rotl_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ } else {
+ TCGv_i64 t0, t1;
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ tcg_gen_shli_i64(t0, arg1, arg2);
+ tcg_gen_shri_i64(t1, arg1, 64 - arg2);
+ tcg_gen_or_i64(ret, t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_rot_i64) {
+ tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
+ } else {
+ TCGv_i64 t0, t1;
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ tcg_gen_shr_i64(t0, arg1, arg2);
+ tcg_gen_subfi_i64(t1, 64, arg2);
+ tcg_gen_shl_i64(t1, arg1, t1);
+ tcg_gen_or_i64(ret, t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+{
+ tcg_debug_assert(arg2 < 64);
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ tcg_gen_rotli_i64(ret, arg1, 64 - arg2);
+ }
+}
+
+void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
+ unsigned int ofs, unsigned int len)
+{
+ uint64_t mask;
+ TCGv_i64 t1;
+
+ tcg_debug_assert(ofs < 64);
+ tcg_debug_assert(len <= 64);
+ tcg_debug_assert(ofs + len <= 64);
+
+ if (ofs == 0 && len == 64) {
+ tcg_gen_mov_i64(ret, arg2);
+ return;
+ }
+ if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) {
+ tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len);
+ return;
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ if (ofs >= 32) {
+ tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1),
+ TCGV_LOW(arg2), ofs - 32, len);
+ tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
+ return;
+ }
+ if (ofs + len <= 32) {
+ tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(arg1),
+ TCGV_LOW(arg2), ofs, len);
+ tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
+ return;
+ }
+ }
+
+ mask = (1ull << len) - 1;
+ t1 = tcg_temp_new_i64();
+
+ if (ofs + len < 64) {
+ tcg_gen_andi_i64(t1, arg2, mask);
+ tcg_gen_shli_i64(t1, t1, ofs);
+ } else {
+ tcg_gen_shli_i64(t1, arg2, ofs);
+ }
+ tcg_gen_andi_i64(ret, arg1, ~(mask << ofs));
+ tcg_gen_or_i64(ret, ret, t1);
+
+ tcg_temp_free_i64(t1);
+}
+
+void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
+ TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0,
+ TCGV_LOW(c1), TCGV_HIGH(c1),
+ TCGV_LOW(c2), TCGV_HIGH(c2), cond);
+
+ if (TCG_TARGET_HAS_movcond_i32) {
+ tcg_gen_movi_i32(t1, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, TCGV_LOW(ret), t0, t1,
+ TCGV_LOW(v1), TCGV_LOW(v2));
+ tcg_gen_movcond_i32(TCG_COND_NE, TCGV_HIGH(ret), t0, t1,
+ TCGV_HIGH(v1), TCGV_HIGH(v2));
+ } else {
+ tcg_gen_neg_i32(t0, t0);
+
+ tcg_gen_and_i32(t1, TCGV_LOW(v1), t0);
+ tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(v2), t0);
+ tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t1);
+
+ tcg_gen_and_i32(t1, TCGV_HIGH(v1), t0);
+ tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(v2), t0);
+ tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t1);
+ }
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ } else if (TCG_TARGET_HAS_movcond_i64) {
+ tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(cond, t0, c1, c2);
+ tcg_gen_neg_i64(t0, t0);
+ tcg_gen_and_i64(t1, v1, t0);
+ tcg_gen_andc_i64(ret, v2, t0);
+ tcg_gen_or_i64(ret, ret, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
+{
+ if (TCG_TARGET_HAS_add2_i64) {
+ tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_add_i64(t0, al, bl);
+ tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al);
+ tcg_gen_add_i64(rh, ah, bh);
+ tcg_gen_add_i64(rh, rh, t1);
+ tcg_gen_mov_i64(rl, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
+{
+ if (TCG_TARGET_HAS_sub2_i64) {
+ tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_sub_i64(t0, al, bl);
+ tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl);
+ tcg_gen_sub_i64(rh, ah, bh);
+ tcg_gen_sub_i64(rh, rh, t1);
+ tcg_gen_mov_i64(rl, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+
+void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_mulu2_i64) {
+ tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
+ } else if (TCG_TARGET_HAS_muluh_i64) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
+ tcg_gen_mov_i64(rl, t);
+ tcg_temp_free_i64(t);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_mul_i64(t0, arg1, arg2);
+ gen_helper_muluh_i64(rh, arg1, arg2);
+ tcg_gen_mov_i64(rl, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_muls2_i64) {
+ tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
+ } else if (TCG_TARGET_HAS_mulsh_i64) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
+ tcg_gen_mov_i64(rl, t);
+ tcg_temp_free_i64(t);
+ } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
+ /* Adjust for negative inputs. */
+ tcg_gen_sari_i64(t2, arg1, 63);
+ tcg_gen_sari_i64(t3, arg2, 63);
+ tcg_gen_and_i64(t2, t2, arg2);
+ tcg_gen_and_i64(t3, t3, arg1);
+ tcg_gen_sub_i64(rh, t1, t2);
+ tcg_gen_sub_i64(rh, rh, t3);
+ tcg_gen_mov_i64(rl, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_mul_i64(t0, arg1, arg2);
+ gen_helper_mulsh_i64(rh, arg1, arg2);
+ tcg_gen_mov_i64(rl, t0);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+/* Size changing operations. */
+
+void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg, unsigned count)
+{
+ tcg_debug_assert(count < 64);
+ if (TCG_TARGET_REG_BITS == 32) {
+ if (count >= 32) {
+ tcg_gen_shri_i32(ret, TCGV_HIGH(arg), count - 32);
+ } else if (count == 0) {
+ tcg_gen_mov_i32(ret, TCGV_LOW(arg));
+ } else {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t, arg, count);
+ tcg_gen_mov_i32(ret, TCGV_LOW(t));
+ tcg_temp_free_i64(t);
+ }
+ } else if (TCG_TARGET_HAS_trunc_shr_i32) {
+ tcg_gen_op3i_i32(INDEX_op_trunc_shr_i32, ret,
+ MAKE_TCGV_I32(GET_TCGV_I64(arg)), count);
+ } else if (count == 0) {
+ tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
+ } else {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t, arg, count);
+ tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(t)));
+ tcg_temp_free_i64(t);
+ }
+}
+
+void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_mov_i32(TCGV_LOW(ret), arg);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else {
+ /* Note: we assume the target supports move between
+ 32 and 64 bit registers. */
+ tcg_gen_ext32u_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)));
+ }
+}
+
+void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_mov_i32(TCGV_LOW(ret), arg);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
+ } else {
+ /* Note: we assume the target supports move between
+ 32 and 64 bit registers. */
+ tcg_gen_ext32s_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)));
+ }
+}
+
+void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high)
+{
+ TCGv_i64 tmp;
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_mov_i32(TCGV_LOW(dest), low);
+ tcg_gen_mov_i32(TCGV_HIGH(dest), high);
+ return;
+ }
+
+ tmp = tcg_temp_new_i64();
+ /* These extensions are only needed for type correctness.
+ We may be able to do better given target specific information. */
+ tcg_gen_extu_i32_i64(tmp, high);
+ tcg_gen_extu_i32_i64(dest, low);
+ /* If deposit is available, use it. Otherwise use the extra
+ knowledge that we have of the zero-extensions above. */
+ if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) {
+ tcg_gen_deposit_i64(dest, dest, tmp, 32, 32);
+ } else {
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_or_i64(dest, dest, tmp);
+ }
+ tcg_temp_free_i64(tmp);
+}
+
+void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_mov_i32(lo, TCGV_LOW(arg));
+ tcg_gen_mov_i32(hi, TCGV_HIGH(arg));
+ } else {
+ tcg_gen_trunc_shr_i64_i32(lo, arg, 0);
+ tcg_gen_trunc_shr_i64_i32(hi, arg, 32);
+ }
+}
+
+void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg)
+{
+ tcg_gen_ext32u_i64(lo, arg);
+ tcg_gen_shri_i64(hi, arg, 32);
+}
+
+/* QEMU specific operations. */
+
+void tcg_gen_goto_tb(unsigned idx)
+{
+ /* We only support two chained exits. */
+ tcg_debug_assert(idx <= 1);
+#ifdef CONFIG_DEBUG_TCG
+ /* Verify that we havn't seen this numbered exit before. */
+ tcg_debug_assert((tcg_ctx.goto_tb_issue_mask & (1 << idx)) == 0);
+ tcg_ctx.goto_tb_issue_mask |= 1 << idx;
+#endif
+ tcg_gen_op1i(INDEX_op_goto_tb, idx);
+}
+
+static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
+{
+ switch (op & MO_SIZE) {
+ case MO_8:
+ op &= ~MO_BSWAP;
+ break;
+ case MO_16:
+ break;
+ case MO_32:
+ if (!is64) {
+ op &= ~MO_SIGN;
+ }
+ break;
+ case MO_64:
+ if (!is64) {
+ tcg_abort();
+ }
+ break;
+ }
+ if (st) {
+ op &= ~MO_SIGN;
+ }
+ return op;
+}
+
+static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
+ TCGMemOp memop, TCGArg idx)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op4ii_i32(opc, val, addr, memop, idx);
+#else
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_op5ii_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr),
+ memop, idx);
+ } else {
+ tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I32(val), GET_TCGV_I64(addr),
+ memop, idx);
+ }
+#endif
+}
+
+static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
+ TCGMemOp memop, TCGArg idx)
+{
+#if TARGET_LONG_BITS == 32
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_op5ii_i32(opc, TCGV_LOW(val), TCGV_HIGH(val),
+ addr, memop, idx);
+ } else {
+ tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I64(val), GET_TCGV_I32(addr),
+ memop, idx);
+ }
+#else
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_op6ii_i32(opc, TCGV_LOW(val), TCGV_HIGH(val),
+ TCGV_LOW(addr), TCGV_HIGH(addr), memop, idx);
+ } else {
+ tcg_gen_op4ii_i64(opc, val, addr, memop, idx);
+ }
+#endif
+}
+
+void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 0, 0);
+ gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
+}
+
+void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 0, 1);
+ gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
+}
+
+void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+{
+ if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
+ tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
+ if (memop & MO_SIGN) {
+ tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
+ } else {
+ tcg_gen_movi_i32(TCGV_HIGH(val), 0);
+ }
+ return;
+ }
+
+ memop = tcg_canonicalize_memop(memop, 1, 0);
+ gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
+}
+
+void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+{
+ if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
+ tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
+ return;
+ }
+
+ memop = tcg_canonicalize_memop(memop, 1, 1);
+ gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
+}
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index 019dd9b6fb..96adf9af6a 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -21,359 +21,309 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+
#include "tcg.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
-int gen_new_label(void);
+/* Basic output routines. Not for general consumption. */
+
+void tcg_gen_op1(TCGContext *, TCGOpcode, TCGArg);
+void tcg_gen_op2(TCGContext *, TCGOpcode, TCGArg, TCGArg);
+void tcg_gen_op3(TCGContext *, TCGOpcode, TCGArg, TCGArg, TCGArg);
+void tcg_gen_op4(TCGContext *, TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg);
+void tcg_gen_op5(TCGContext *, TCGOpcode, TCGArg, TCGArg, TCGArg,
+ TCGArg, TCGArg);
+void tcg_gen_op6(TCGContext *, TCGOpcode, TCGArg, TCGArg, TCGArg,
+ TCGArg, TCGArg, TCGArg);
-static inline void tcg_gen_op0(TCGOpcode opc)
-{
- *tcg_ctx.gen_opc_ptr++ = opc;
-}
-static inline void tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 arg1)
+static inline void tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
+ tcg_gen_op1(&tcg_ctx, opc, GET_TCGV_I32(a1));
}
-static inline void tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 arg1)
+static inline void tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
+ tcg_gen_op1(&tcg_ctx, opc, GET_TCGV_I64(a1));
}
-static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg arg1)
+static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg a1)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = arg1;
+ tcg_gen_op1(&tcg_ctx, opc, a1);
}
-static inline void tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2)
+static inline void tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
+ tcg_gen_op2(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2));
}
-static inline void tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2)
+static inline void tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
+ tcg_gen_op2(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2));
}
-static inline void tcg_gen_op2i_i32(TCGOpcode opc, TCGv_i32 arg1, TCGArg arg2)
+static inline void tcg_gen_op2i_i32(TCGOpcode opc, TCGv_i32 a1, TCGArg a2)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = arg2;
+ tcg_gen_op2(&tcg_ctx, opc, GET_TCGV_I32(a1), a2);
}
-static inline void tcg_gen_op2i_i64(TCGOpcode opc, TCGv_i64 arg1, TCGArg arg2)
+static inline void tcg_gen_op2i_i64(TCGOpcode opc, TCGv_i64 a1, TCGArg a2)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = arg2;
+ tcg_gen_op2(&tcg_ctx, opc, GET_TCGV_I64(a1), a2);
}
-static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg arg1, TCGArg arg2)
+static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg a1, TCGArg a2)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = arg1;
- *tcg_ctx.gen_opparam_ptr++ = arg2;
+ tcg_gen_op2(&tcg_ctx, opc, a1, a2);
}
-static inline void tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
- TCGv_i32 arg3)
+static inline void tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1,
+ TCGv_i32 a2, TCGv_i32 a3)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg3);
+ tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(a1),
+ GET_TCGV_I32(a2), GET_TCGV_I32(a3));
}
-static inline void tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2,
- TCGv_i64 arg3)
+static inline void tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1,
+ TCGv_i64 a2, TCGv_i64 a3)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg3);
+ tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(a1),
+ GET_TCGV_I64(a2), GET_TCGV_I64(a3));
}
-static inline void tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 arg1,
- TCGv_i32 arg2, TCGArg arg3)
+static inline void tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1,
+ TCGv_i32 a2, TCGArg a3)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = arg3;
+ tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), a3);
}
-static inline void tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 arg1,
- TCGv_i64 arg2, TCGArg arg3)
+static inline void tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1,
+ TCGv_i64 a2, TCGArg a3)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = arg3;
+ tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), a3);
}
static inline void tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val,
TCGv_ptr base, TCGArg offset)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(val);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_PTR(base);
- *tcg_ctx.gen_opparam_ptr++ = offset;
+ tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(val), GET_TCGV_PTR(base), offset);
}
static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
TCGv_ptr base, TCGArg offset)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(val);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_PTR(base);
- *tcg_ctx.gen_opparam_ptr++ = offset;
+ tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(val), GET_TCGV_PTR(base), offset);
}
-static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
- TCGv_i32 arg3, TCGv_i32 arg4)
+static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGv_i32 a3, TCGv_i32 a4)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg4);
+ tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
+ GET_TCGV_I32(a3), GET_TCGV_I32(a4));
}
-static inline void tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2,
- TCGv_i64 arg3, TCGv_i64 arg4)
+static inline void tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGv_i64 a3, TCGv_i64 a4)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg4);
+ tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
+ GET_TCGV_I64(a3), GET_TCGV_I64(a4));
}
-static inline void tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
- TCGv_i32 arg3, TCGArg arg4)
+static inline void tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGv_i32 a3, TCGArg a4)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg3);
- *tcg_ctx.gen_opparam_ptr++ = arg4;
+ tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
+ GET_TCGV_I32(a3), a4);
}
-static inline void tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2,
- TCGv_i64 arg3, TCGArg arg4)
+static inline void tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGv_i64 a3, TCGArg a4)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg3);
- *tcg_ctx.gen_opparam_ptr++ = arg4;
+ tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
+ GET_TCGV_I64(a3), a4);
}
-static inline void tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
- TCGArg arg3, TCGArg arg4)
+static inline void tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGArg a3, TCGArg a4)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = arg3;
- *tcg_ctx.gen_opparam_ptr++ = arg4;
+ tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), a3, a4);
}
-static inline void tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2,
- TCGArg arg3, TCGArg arg4)
+static inline void tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGArg a3, TCGArg a4)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = arg3;
- *tcg_ctx.gen_opparam_ptr++ = arg4;
+ tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), a3, a4);
}
-static inline void tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
- TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5)
+static inline void tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg4);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg5);
+ tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
+ GET_TCGV_I32(a3), GET_TCGV_I32(a4), GET_TCGV_I32(a5));
}
-static inline void tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2,
- TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5)
+static inline void tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg4);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg5);
+ tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
+ GET_TCGV_I64(a3), GET_TCGV_I64(a4), GET_TCGV_I64(a5));
}
-static inline void tcg_gen_op5i_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
- TCGv_i32 arg3, TCGv_i32 arg4, TCGArg arg5)
+static inline void tcg_gen_op5i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGv_i32 a3, TCGv_i32 a4, TCGArg a5)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg4);
- *tcg_ctx.gen_opparam_ptr++ = arg5;
+ tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
+ GET_TCGV_I32(a3), GET_TCGV_I32(a4), a5);
}
-static inline void tcg_gen_op5i_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2,
- TCGv_i64 arg3, TCGv_i64 arg4, TCGArg arg5)
+static inline void tcg_gen_op5i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGv_i64 a3, TCGv_i64 a4, TCGArg a5)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg4);
- *tcg_ctx.gen_opparam_ptr++ = arg5;
+ tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
+ GET_TCGV_I64(a3), GET_TCGV_I64(a4), a5);
}
-static inline void tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 arg1,
- TCGv_i32 arg2, TCGv_i32 arg3,
- TCGArg arg4, TCGArg arg5)
+static inline void tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGv_i32 a3, TCGArg a4, TCGArg a5)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg3);
- *tcg_ctx.gen_opparam_ptr++ = arg4;
- *tcg_ctx.gen_opparam_ptr++ = arg5;
+ tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
+ GET_TCGV_I32(a3), a4, a5);
}
-static inline void tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 arg1,
- TCGv_i64 arg2, TCGv_i64 arg3,
- TCGArg arg4, TCGArg arg5)
+static inline void tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGv_i64 a3, TCGArg a4, TCGArg a5)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg3);
- *tcg_ctx.gen_opparam_ptr++ = arg4;
- *tcg_ctx.gen_opparam_ptr++ = arg5;
+ tcg_gen_op5(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
+ GET_TCGV_I64(a3), a4, a5);
}
-static inline void tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
- TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5,
- TCGv_i32 arg6)
+static inline void tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGv_i32 a3, TCGv_i32 a4,
+ TCGv_i32 a5, TCGv_i32 a6)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg4);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg5);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg6);
+ tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
+ GET_TCGV_I32(a3), GET_TCGV_I32(a4), GET_TCGV_I32(a5),
+ GET_TCGV_I32(a6));
}
-static inline void tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2,
- TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5,
- TCGv_i64 arg6)
+static inline void tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGv_i64 a3, TCGv_i64 a4,
+ TCGv_i64 a5, TCGv_i64 a6)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg4);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg5);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg6);
+ tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
+ GET_TCGV_I64(a3), GET_TCGV_I64(a4), GET_TCGV_I64(a5),
+ GET_TCGV_I64(a6));
}
-static inline void tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
- TCGv_i32 arg3, TCGv_i32 arg4,
- TCGv_i32 arg5, TCGArg arg6)
+static inline void tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGv_i32 a3, TCGv_i32 a4,
+ TCGv_i32 a5, TCGArg a6)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg4);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg5);
- *tcg_ctx.gen_opparam_ptr++ = arg6;
+ tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
+ GET_TCGV_I32(a3), GET_TCGV_I32(a4), GET_TCGV_I32(a5), a6);
}
-static inline void tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2,
- TCGv_i64 arg3, TCGv_i64 arg4,
- TCGv_i64 arg5, TCGArg arg6)
+static inline void tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGv_i64 a3, TCGv_i64 a4,
+ TCGv_i64 a5, TCGArg a6)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg4);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg5);
- *tcg_ctx.gen_opparam_ptr++ = arg6;
+ tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
+ GET_TCGV_I64(a3), GET_TCGV_I64(a4), GET_TCGV_I64(a5), a6);
}
-static inline void tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 arg1,
- TCGv_i32 arg2, TCGv_i32 arg3,
- TCGv_i32 arg4, TCGArg arg5, TCGArg arg6)
+static inline void tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
+ TCGv_i32 a3, TCGv_i32 a4,
+ TCGArg a5, TCGArg a6)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(arg4);
- *tcg_ctx.gen_opparam_ptr++ = arg5;
- *tcg_ctx.gen_opparam_ptr++ = arg6;
+ tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
+ GET_TCGV_I32(a3), GET_TCGV_I32(a4), a5, a6);
}
-static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 arg1,
- TCGv_i64 arg2, TCGv_i64 arg3,
- TCGv_i64 arg4, TCGArg arg5, TCGArg arg6)
+static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
+ TCGv_i64 a3, TCGv_i64 a4,
+ TCGArg a5, TCGArg a6)
{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg1);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg2);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg3);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(arg4);
- *tcg_ctx.gen_opparam_ptr++ = arg5;
- *tcg_ctx.gen_opparam_ptr++ = arg6;
+ tcg_gen_op6(&tcg_ctx, opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
+ GET_TCGV_I64(a3), GET_TCGV_I64(a4), a5, a6);
}
-static inline void tcg_add_param_i32(TCGv_i32 val)
-{
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(val);
-}
-static inline void tcg_add_param_i64(TCGv_i64 val)
-{
-#if TCG_TARGET_REG_BITS == 32
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(TCGV_LOW(val));
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(TCGV_HIGH(val));
-#else
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(val);
-#endif
-}
+/* Generic ops. */
+
+int gen_new_label(void);
static inline void gen_set_label(int n)
{
- tcg_gen_op1i(INDEX_op_set_label, n);
+ tcg_gen_op1(&tcg_ctx, INDEX_op_set_label, n);
}
static inline void tcg_gen_br(int label)
{
- tcg_gen_op1i(INDEX_op_br, label);
+ tcg_gen_op1(&tcg_ctx, INDEX_op_br, label);
+}
+
+
+/* Helper calls. */
+
+/* 32 bit ops */
+
+void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2);
+void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
+void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
+void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
+void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
+void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
+void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
+void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, int label);
+void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, int label);
+void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
+ TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2);
+void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
+void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
+void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
+
+static inline void tcg_gen_discard_i32(TCGv_i32 arg)
+{
+ tcg_gen_op1_i32(INDEX_op_discard, arg);
}
static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (!TCGV_EQUAL_I32(ret, arg))
+ if (!TCGV_EQUAL_I32(ret, arg)) {
tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg);
+ }
}
static inline void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg)
@@ -381,44 +331,50 @@ static inline void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg)
tcg_gen_op2i_i32(INDEX_op_movi_i32, ret, arg);
}
-/* 32 bit ops */
-
-static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset);
}
-static inline void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset);
}
-static inline void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset);
}
-static inline void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset);
}
-static inline void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset);
}
-static inline void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i32(INDEX_op_st8_i32, arg1, arg2, offset);
}
-static inline void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i32(INDEX_op_st16_i32, arg1, arg2, offset);
}
-static inline void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i32(INDEX_op_st_i32, arg1, arg2, offset);
}
@@ -428,126 +384,24 @@ static inline void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2);
}
-static inline void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_add_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
-}
-
static inline void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2);
}
-static inline void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2)
-{
- TCGv_i32 t0 = tcg_const_i32(arg1);
- tcg_gen_sub_i32(ret, t0, arg2);
- tcg_temp_free_i32(t0);
-}
-
-static inline void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_sub_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
-}
-
static inline void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCGV_EQUAL_I32(arg1, arg2)) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
-{
- TCGv_i32 t0;
- /* Some cases can be optimized here. */
- switch (arg2) {
- case 0:
- tcg_gen_movi_i32(ret, 0);
- return;
- case 0xffffffffu:
- tcg_gen_mov_i32(ret, arg1);
- return;
- case 0xffu:
- /* Don't recurse with tcg_gen_ext8u_i32. */
- if (TCG_TARGET_HAS_ext8u_i32) {
- tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1);
- return;
- }
- break;
- case 0xffffu:
- if (TCG_TARGET_HAS_ext16u_i32) {
- tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1);
- return;
- }
- break;
- }
- t0 = tcg_const_i32(arg2);
- tcg_gen_and_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
+ tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
}
static inline void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCGV_EQUAL_I32(arg1, arg2)) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- /* Some cases can be optimized here. */
- if (arg2 == -1) {
- tcg_gen_movi_i32(ret, -1);
- } else if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_or_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
+ tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2);
}
static inline void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (TCGV_EQUAL_I32(arg1, arg2)) {
- tcg_gen_movi_i32(ret, 0);
- } else {
- tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- /* Some cases can be optimized here. */
- if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) {
- /* Don't recurse with tcg_gen_not_i32. */
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
- } else {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_xor_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
+ tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2);
}
static inline void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
@@ -555,449 +409,102 @@ static inline void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
tcg_gen_op3_i32(INDEX_op_shl_i32, ret, arg1, arg2);
}
-static inline void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_shl_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
-}
-
static inline void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2);
}
-static inline void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_shr_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
-}
-
static inline void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_op3_i32(INDEX_op_sar_i32, ret, arg1, arg2);
}
-static inline void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_sar_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
-}
-
-static inline void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1,
- TCGv_i32 arg2, int label_index)
-{
- if (cond == TCG_COND_ALWAYS) {
- tcg_gen_br(label_index);
- } else if (cond != TCG_COND_NEVER) {
- tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_index);
- }
-}
-
-static inline void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1,
- int32_t arg2, int label_index)
-{
- if (cond == TCG_COND_ALWAYS) {
- tcg_gen_br(label_index);
- } else if (cond != TCG_COND_NEVER) {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_brcond_i32(cond, arg1, t0, label_index);
- tcg_temp_free_i32(t0);
- }
-}
-
-static inline void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
- TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (cond == TCG_COND_ALWAYS) {
- tcg_gen_movi_i32(ret, 1);
- } else if (cond == TCG_COND_NEVER) {
- tcg_gen_movi_i32(ret, 0);
- } else {
- tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond);
- }
-}
-
-static inline void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
- TCGv_i32 arg1, int32_t arg2)
-{
- if (cond == TCG_COND_ALWAYS) {
- tcg_gen_movi_i32(ret, 1);
- } else if (cond == TCG_COND_NEVER) {
- tcg_gen_movi_i32(ret, 0);
- } else {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_setcond_i32(cond, ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
-}
-
static inline void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_op3_i32(INDEX_op_mul_i32, ret, arg1, arg2);
}
-static inline void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_mul_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
-}
-
-static inline void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_div_i32) {
- tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div2_i32) {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_sari_i32(t0, arg1, 31);
- tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);
- tcg_temp_free_i32(t0);
- } else {
- gen_helper_div_i32(ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_rem_i32) {
- tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div_i32) {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
- tcg_gen_mul_i32(t0, t0, arg2);
- tcg_gen_sub_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- } else if (TCG_TARGET_HAS_div2_i32) {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_sari_i32(t0, arg1, 31);
- tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);
- tcg_temp_free_i32(t0);
- } else {
- gen_helper_rem_i32(ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_div_i32) {
- tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div2_i32) {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_movi_i32(t0, 0);
- tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);
- tcg_temp_free_i32(t0);
- } else {
- gen_helper_divu_i32(ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_rem_i32) {
- tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div_i32) {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
- tcg_gen_mul_i32(t0, t0, arg2);
- tcg_gen_sub_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- } else if (TCG_TARGET_HAS_div2_i32) {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_movi_i32(t0, 0);
- tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);
- tcg_temp_free_i32(t0);
- } else {
- gen_helper_remu_i32(ret, arg1, arg2);
- }
-}
-
-#if TCG_TARGET_REG_BITS == 32
-
-static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- if (!TCGV_EQUAL_I64(ret, arg)) {
- tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
- }
-}
-
-static inline void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
-{
- tcg_gen_movi_i32(TCGV_LOW(ret), arg);
- tcg_gen_movi_i32(TCGV_HIGH(ret), arg >> 32);
-}
-
-static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset);
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-}
-
-static inline void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset);
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), 31);
-}
-
-static inline void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset);
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-}
-
-static inline void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset);
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
-}
-
-static inline void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-}
-
-static inline void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
-}
-
-static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- /* since arg2 and ret have different types, they cannot be the
- same temporary */
-#ifdef HOST_WORDS_BIGENDIAN
- tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
- tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4);
-#else
- tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
- tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset + 4);
-#endif
-}
-
-static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_st8_i32(TCGV_LOW(arg1), arg2, offset);
-}
-
-static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_st16_i32(TCGV_LOW(arg1), arg2, offset);
-}
-
-static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
-}
-
-static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
-#ifdef HOST_WORDS_BIGENDIAN
- tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset);
- tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4);
-#else
- tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
- tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset + 4);
-#endif
-}
-
-static inline void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op6_i32(INDEX_op_add2_i32, TCGV_LOW(ret), TCGV_HIGH(ret),
- TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2),
- TCGV_HIGH(arg2));
- /* Allow the optimizer room to replace add2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
-}
-
-static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op6_i32(INDEX_op_sub2_i32, TCGV_LOW(ret), TCGV_HIGH(ret),
- TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2),
- TCGV_HIGH(arg2));
- /* Allow the optimizer room to replace sub2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
-}
-
-static inline void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
- tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
-}
-
-static inline void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
- tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
-}
-
-static inline void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
- tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
-}
-
-static inline void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- tcg_gen_ori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
- tcg_gen_ori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
-}
-
-static inline void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
- tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
-}
-
-static inline void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- tcg_gen_xori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
- tcg_gen_xori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
-}
-
-/* XXX: use generic code when basic block handling is OK or CPU
- specific code (x86) */
-static inline void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- gen_helper_shl_i64(ret, arg1, arg2);
-}
-
-static inline void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0);
-}
-
-static inline void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- gen_helper_shr_i64(ret, arg1, arg2);
-}
-
-static inline void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0);
-}
-
-static inline void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- gen_helper_sar_i64(ret, arg1, arg2);
-}
-
-static inline void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1);
-}
-
-static inline void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1,
- TCGv_i64 arg2, int label_index)
-{
- if (cond == TCG_COND_ALWAYS) {
- tcg_gen_br(label_index);
- } else if (cond != TCG_COND_NEVER) {
- tcg_gen_op6ii_i32(INDEX_op_brcond2_i32,
- TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2),
- TCGV_HIGH(arg2), cond, label_index);
- }
-}
-
-static inline void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
- TCGv_i64 arg1, TCGv_i64 arg2)
+static inline void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (cond == TCG_COND_ALWAYS) {
- tcg_gen_movi_i32(TCGV_LOW(ret), 1);
- } else if (cond == TCG_COND_NEVER) {
- tcg_gen_movi_i32(TCGV_LOW(ret), 0);
+ if (TCG_TARGET_HAS_neg_i32) {
+ tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg);
} else {
- tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
- TCGV_LOW(arg1), TCGV_HIGH(arg1),
- TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
+ tcg_gen_subfi_i32(ret, 0, arg);
}
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
}
-static inline void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- TCGv_i64 t0;
- TCGv_i32 t1;
-
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i32();
-
- if (TCG_TARGET_HAS_mulu2_i32) {
- tcg_gen_op4_i32(INDEX_op_mulu2_i32, TCGV_LOW(t0), TCGV_HIGH(t0),
- TCGV_LOW(arg1), TCGV_LOW(arg2));
- /* Allow the optimizer room to replace mulu2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
+ if (TCG_TARGET_HAS_not_i32) {
+ tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
} else {
- tcg_debug_assert(TCG_TARGET_HAS_muluh_i32);
- tcg_gen_op3_i32(INDEX_op_mul_i32, TCGV_LOW(t0),
- TCGV_LOW(arg1), TCGV_LOW(arg2));
- tcg_gen_op3_i32(INDEX_op_muluh_i32, TCGV_HIGH(t0),
- TCGV_LOW(arg1), TCGV_LOW(arg2));
+ tcg_gen_xori_i32(ret, arg, -1);
}
-
- tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2));
- tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
- tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), TCGV_LOW(arg2));
- tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
-
- tcg_gen_mov_i64(ret, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i32(t1);
}
-static inline void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- gen_helper_div_i64(ret, arg1, arg2);
-}
-
-static inline void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- gen_helper_rem_i64(ret, arg1, arg2);
-}
-
-static inline void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- gen_helper_divu_i64(ret, arg1, arg2);
-}
+/* 64 bit ops */
+
+void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2);
+void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
+void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
+void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
+void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
+void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
+void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
+void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, int label);
+void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, int label);
+void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
+ TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2);
+void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
+void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
+void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
-static inline void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+#if TCG_TARGET_REG_BITS == 64
+static inline void tcg_gen_discard_i64(TCGv_i64 arg)
{
- gen_helper_remu_i64(ret, arg1, arg2);
+ tcg_gen_op1_i64(INDEX_op_discard, arg);
}
-#else
-
static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
{
- if (!TCGV_EQUAL_I64(ret, arg))
+ if (!TCGV_EQUAL_I64(ret, arg)) {
tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg);
+ }
}
static inline void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
@@ -1041,7 +548,8 @@ static inline void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2,
tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset);
}
-static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset);
}
@@ -1064,7 +572,8 @@ static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2,
tcg_gen_ldst_op_i64(INDEX_op_st32_i64, arg1, arg2, offset);
}
-static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
+static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2,
+ tcg_target_long offset)
{
tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset);
}
@@ -1081,94 +590,17 @@ static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
static inline void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCGV_EQUAL_I64(arg1, arg2)) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
-{
- TCGv_i64 t0;
- /* Some cases can be optimized here. */
- switch (arg2) {
- case 0:
- tcg_gen_movi_i64(ret, 0);
- return;
- case 0xffffffffffffffffull:
- tcg_gen_mov_i64(ret, arg1);
- return;
- case 0xffull:
- /* Don't recurse with tcg_gen_ext8u_i32. */
- if (TCG_TARGET_HAS_ext8u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1);
- return;
- }
- break;
- case 0xffffu:
- if (TCG_TARGET_HAS_ext16u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1);
- return;
- }
- break;
- case 0xffffffffull:
- if (TCG_TARGET_HAS_ext32u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1);
- return;
- }
- break;
- }
- t0 = tcg_const_i64(arg2);
- tcg_gen_and_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
+ tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
}
static inline void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCGV_EQUAL_I64(arg1, arg2)) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- /* Some cases can be optimized here. */
- if (arg2 == -1) {
- tcg_gen_movi_i64(ret, -1);
- } else if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_or_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- }
+ tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2);
}
static inline void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCGV_EQUAL_I64(arg1, arg2)) {
- tcg_gen_movi_i64(ret, 0);
- } else {
- tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- /* Some cases can be optimized here. */
- if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) {
- /* Don't recurse with tcg_gen_not_i64. */
- tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
- } else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_xor_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- }
+ tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2);
}
static inline void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
@@ -1176,993 +608,92 @@ static inline void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_gen_op3_i64(INDEX_op_shl_i64, ret, arg1, arg2);
}
-static inline void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_shl_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- }
-}
-
static inline void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
tcg_gen_op3_i64(INDEX_op_shr_i64, ret, arg1, arg2);
}
-static inline void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_shr_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- }
-}
-
static inline void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
tcg_gen_op3_i64(INDEX_op_sar_i64, ret, arg1, arg2);
}
-static inline void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_sar_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- }
-}
-
-static inline void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1,
- TCGv_i64 arg2, int label_index)
-{
- if (cond == TCG_COND_ALWAYS) {
- tcg_gen_br(label_index);
- } else if (cond != TCG_COND_NEVER) {
- tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, label_index);
- }
-}
-
-static inline void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
- TCGv_i64 arg1, TCGv_i64 arg2)
-{
- if (cond == TCG_COND_ALWAYS) {
- tcg_gen_movi_i64(ret, 1);
- } else if (cond == TCG_COND_NEVER) {
- tcg_gen_movi_i64(ret, 0);
- } else {
- tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond);
- }
-}
-
static inline void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
tcg_gen_op3_i64(INDEX_op_mul_i64, ret, arg1, arg2);
}
-
-static inline void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- if (TCG_TARGET_HAS_div_i64) {
- tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div2_i64) {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_sari_i64(t0, arg1, 63);
- tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2);
- tcg_temp_free_i64(t0);
- } else {
- gen_helper_div_i64(ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- if (TCG_TARGET_HAS_rem_i64) {
- tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div_i64) {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
- tcg_gen_mul_i64(t0, t0, arg2);
- tcg_gen_sub_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- } else if (TCG_TARGET_HAS_div2_i64) {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_sari_i64(t0, arg1, 63);
- tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2);
- tcg_temp_free_i64(t0);
- } else {
- gen_helper_rem_i64(ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- if (TCG_TARGET_HAS_div_i64) {
- tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div2_i64) {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_movi_i64(t0, 0);
- tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2);
- tcg_temp_free_i64(t0);
- } else {
- gen_helper_divu_i64(ret, arg1, arg2);
- }
-}
-
-static inline void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- if (TCG_TARGET_HAS_rem_i64) {
- tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_div_i64) {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
- tcg_gen_mul_i64(t0, t0, arg2);
- tcg_gen_sub_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- } else if (TCG_TARGET_HAS_div2_i64) {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_movi_i64(t0, 0);
- tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2);
- tcg_temp_free_i64(t0);
- } else {
- gen_helper_remu_i64(ret, arg1, arg2);
- }
-}
-#endif /* TCG_TARGET_REG_BITS == 32 */
-
-static inline void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_add_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- }
-}
-
-static inline void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2)
-{
- TCGv_i64 t0 = tcg_const_i64(arg1);
- tcg_gen_sub_i64(ret, t0, arg2);
- tcg_temp_free_i64(t0);
-}
-
-static inline void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_sub_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- }
-}
-static inline void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1,
- int64_t arg2, int label_index)
-{
- if (cond == TCG_COND_ALWAYS) {
- tcg_gen_br(label_index);
- } else if (cond != TCG_COND_NEVER) {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_brcond_i64(cond, arg1, t0, label_index);
- tcg_temp_free_i64(t0);
- }
-}
-
-static inline void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
- TCGv_i64 arg1, int64_t arg2)
-{
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_setcond_i64(cond, ret, arg1, t0);
- tcg_temp_free_i64(t0);
-}
-
-static inline void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_mul_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
-}
-
-
-/***************************************/
-/* optional operations */
-
-static inline void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (TCG_TARGET_HAS_ext8s_i32) {
- tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg);
- } else {
- tcg_gen_shli_i32(ret, arg, 24);
- tcg_gen_sari_i32(ret, ret, 24);
- }
-}
-
-static inline void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (TCG_TARGET_HAS_ext16s_i32) {
- tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg);
- } else {
- tcg_gen_shli_i32(ret, arg, 16);
- tcg_gen_sari_i32(ret, ret, 16);
- }
-}
-
-static inline void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (TCG_TARGET_HAS_ext8u_i32) {
- tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg);
- } else {
- tcg_gen_andi_i32(ret, arg, 0xffu);
- }
-}
-
-static inline void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (TCG_TARGET_HAS_ext16u_i32) {
- tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg);
- } else {
- tcg_gen_andi_i32(ret, arg, 0xffffu);
- }
-}
-
-/* Note: we assume the two high bytes are set to zero */
-static inline void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (TCG_TARGET_HAS_bswap16_i32) {
- tcg_gen_op2_i32(INDEX_op_bswap16_i32, ret, arg);
- } else {
- TCGv_i32 t0 = tcg_temp_new_i32();
-
- tcg_gen_ext8u_i32(t0, arg);
- tcg_gen_shli_i32(t0, t0, 8);
- tcg_gen_shri_i32(ret, arg, 8);
- tcg_gen_or_i32(ret, ret, t0);
- tcg_temp_free_i32(t0);
- }
-}
-
-static inline void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (TCG_TARGET_HAS_bswap32_i32) {
- tcg_gen_op2_i32(INDEX_op_bswap32_i32, ret, arg);
- } else {
- TCGv_i32 t0, t1;
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
-
- tcg_gen_shli_i32(t0, arg, 24);
-
- tcg_gen_andi_i32(t1, arg, 0x0000ff00);
- tcg_gen_shli_i32(t1, t1, 8);
- tcg_gen_or_i32(t0, t0, t1);
-
- tcg_gen_shri_i32(t1, arg, 8);
- tcg_gen_andi_i32(t1, t1, 0x0000ff00);
- tcg_gen_or_i32(t0, t0, t1);
-
- tcg_gen_shri_i32(t1, arg, 24);
- tcg_gen_or_i32(ret, t0, t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- }
-}
-
-#if TCG_TARGET_REG_BITS == 32
-static inline void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
-}
-
-static inline void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
-}
-
-static inline void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
-}
-
-static inline void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-}
-
-static inline void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-}
-
-static inline void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-}
-
-static inline void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg,
- unsigned int count)
-{
- tcg_debug_assert(count < 64);
- if (count >= 32) {
- tcg_gen_shri_i32(ret, TCGV_HIGH(arg), count - 32);
- } else if (count == 0) {
- tcg_gen_mov_i32(ret, TCGV_LOW(arg));
- } else {
- TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_shri_i64(t, arg, count);
- tcg_gen_mov_i32(ret, TCGV_LOW(t));
- tcg_temp_free_i64(t);
- }
-}
-
-static inline void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
-{
- tcg_gen_mov_i32(TCGV_LOW(ret), arg);
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-}
-
-static inline void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
-{
- tcg_gen_mov_i32(TCGV_LOW(ret), arg);
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
-}
-
-/* Note: we assume the six high bytes are set to zero */
-static inline void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
- tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg));
-}
-
-/* Note: we assume the four high bytes are set to zero */
-static inline void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
- tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg));
-}
-
-static inline void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- TCGv_i32 t0, t1;
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
-
- tcg_gen_bswap32_i32(t0, TCGV_LOW(arg));
- tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg));
- tcg_gen_mov_i32(TCGV_LOW(ret), t1);
- tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
-}
-#else
-
-static inline void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- if (TCG_TARGET_HAS_ext8s_i64) {
- tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg);
- } else {
- tcg_gen_shli_i64(ret, arg, 56);
- tcg_gen_sari_i64(ret, ret, 56);
- }
-}
-
-static inline void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- if (TCG_TARGET_HAS_ext16s_i64) {
- tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg);
- } else {
- tcg_gen_shli_i64(ret, arg, 48);
- tcg_gen_sari_i64(ret, ret, 48);
- }
-}
-
-static inline void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- if (TCG_TARGET_HAS_ext32s_i64) {
- tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg);
- } else {
- tcg_gen_shli_i64(ret, arg, 32);
- tcg_gen_sari_i64(ret, ret, 32);
- }
-}
-
-static inline void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- if (TCG_TARGET_HAS_ext8u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg);
- } else {
- tcg_gen_andi_i64(ret, arg, 0xffu);
- }
-}
-
-static inline void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- if (TCG_TARGET_HAS_ext16u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg);
- } else {
- tcg_gen_andi_i64(ret, arg, 0xffffu);
- }
-}
-
-static inline void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- if (TCG_TARGET_HAS_ext32u_i64) {
- tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg);
- } else {
- tcg_gen_andi_i64(ret, arg, 0xffffffffu);
- }
-}
-
-static inline void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg,
- unsigned int count)
-{
- tcg_debug_assert(count < 64);
- if (TCG_TARGET_HAS_trunc_shr_i32) {
- tcg_gen_op3i_i32(INDEX_op_trunc_shr_i32, ret,
- MAKE_TCGV_I32(GET_TCGV_I64(arg)), count);
- } else if (count == 0) {
- tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
- } else {
- TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_shri_i64(t, arg, count);
- tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(t)));
- tcg_temp_free_i64(t);
- }
-}
-
-/* Note: we assume the target supports move between 32 and 64 bit
- registers */
-static inline void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
-{
- tcg_gen_ext32u_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)));
-}
-
-/* Note: we assume the target supports move between 32 and 64 bit
- registers */
-static inline void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
+#else /* TCG_TARGET_REG_BITS == 32 */
+static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2,
+ tcg_target_long offset)
{
- tcg_gen_ext32s_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)));
+ tcg_gen_st8_i32(TCGV_LOW(arg1), arg2, offset);
}
-/* Note: we assume the six high bytes are set to zero */
-static inline void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
+static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2,
+ tcg_target_long offset)
{
- if (TCG_TARGET_HAS_bswap16_i64) {
- tcg_gen_op2_i64(INDEX_op_bswap16_i64, ret, arg);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
-
- tcg_gen_ext8u_i64(t0, arg);
- tcg_gen_shli_i64(t0, t0, 8);
- tcg_gen_shri_i64(ret, arg, 8);
- tcg_gen_or_i64(ret, ret, t0);
- tcg_temp_free_i64(t0);
- }
+ tcg_gen_st16_i32(TCGV_LOW(arg1), arg2, offset);
}
-/* Note: we assume the four high bytes are set to zero */
-static inline void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
+static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2,
+ tcg_target_long offset)
{
- if (TCG_TARGET_HAS_bswap32_i64) {
- tcg_gen_op2_i64(INDEX_op_bswap32_i64, ret, arg);
- } else {
- TCGv_i64 t0, t1;
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
-
- tcg_gen_shli_i64(t0, arg, 24);
- tcg_gen_ext32u_i64(t0, t0);
-
- tcg_gen_andi_i64(t1, arg, 0x0000ff00);
- tcg_gen_shli_i64(t1, t1, 8);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_shri_i64(t1, arg, 8);
- tcg_gen_andi_i64(t1, t1, 0x0000ff00);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_shri_i64(t1, arg, 24);
- tcg_gen_or_i64(ret, t0, t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
+ tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
}
-static inline void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
+static inline void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_bswap64_i64) {
- tcg_gen_op2_i64(INDEX_op_bswap64_i64, ret, arg);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
-
- tcg_gen_shli_i64(t0, arg, 56);
-
- tcg_gen_andi_i64(t1, arg, 0x0000ff00);
- tcg_gen_shli_i64(t1, t1, 40);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_andi_i64(t1, arg, 0x00ff0000);
- tcg_gen_shli_i64(t1, t1, 24);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_andi_i64(t1, arg, 0xff000000);
- tcg_gen_shli_i64(t1, t1, 8);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_shri_i64(t1, arg, 8);
- tcg_gen_andi_i64(t1, t1, 0xff000000);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_shri_i64(t1, arg, 24);
- tcg_gen_andi_i64(t1, t1, 0x00ff0000);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_shri_i64(t1, arg, 40);
- tcg_gen_andi_i64(t1, t1, 0x0000ff00);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_shri_i64(t1, arg, 56);
- tcg_gen_or_i64(ret, t0, t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
+ tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
+ TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
}
-#endif
-
-static inline void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg)
+static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (TCG_TARGET_HAS_neg_i32) {
- tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg);
- } else {
- TCGv_i32 t0 = tcg_const_i32(0);
- tcg_gen_sub_i32(ret, t0, arg);
- tcg_temp_free_i32(t0);
- }
-}
+ tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
+ TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
+}
+
+void tcg_gen_discard_i64(TCGv_i64 arg);
+void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg);
+void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+#endif /* TCG_TARGET_REG_BITS */
static inline void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg)
{
if (TCG_TARGET_HAS_neg_i64) {
tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg);
} else {
- TCGv_i64 t0 = tcg_const_i64(0);
- tcg_gen_sub_i64(ret, t0, arg);
- tcg_temp_free_i64(t0);
- }
-}
-
-static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (TCG_TARGET_HAS_not_i32) {
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
- } else {
- tcg_gen_xori_i32(ret, arg, -1);
- }
-}
-
-static inline void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
-#if TCG_TARGET_REG_BITS == 64
- if (TCG_TARGET_HAS_not_i64) {
- tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
- } else {
- tcg_gen_xori_i64(ret, arg, -1);
- }
-#else
- tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg));
- tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
-#endif
-}
-
-static inline void tcg_gen_discard_i32(TCGv_i32 arg)
-{
- tcg_gen_op1_i32(INDEX_op_discard, arg);
-}
-
-static inline void tcg_gen_discard_i64(TCGv_i64 arg)
-{
-#if TCG_TARGET_REG_BITS == 32
- tcg_gen_discard_i32(TCGV_LOW(arg));
- tcg_gen_discard_i32(TCGV_HIGH(arg));
-#else
- tcg_gen_op1_i64(INDEX_op_discard, arg);
-#endif
-}
-
-static inline void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_andc_i32) {
- tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2);
- } else {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_not_i32(t0, arg2);
- tcg_gen_and_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
-}
-
-static inline void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
-#if TCG_TARGET_REG_BITS == 64
- if (TCG_TARGET_HAS_andc_i64) {
- tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_not_i64(t0, arg2);
- tcg_gen_and_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- }
-#else
- tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
- tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
-#endif
-}
-
-static inline void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_eqv_i32) {
- tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2);
- } else {
- tcg_gen_xor_i32(ret, arg1, arg2);
- tcg_gen_not_i32(ret, ret);
- }
-}
-
-static inline void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
-#if TCG_TARGET_REG_BITS == 64
- if (TCG_TARGET_HAS_eqv_i64) {
- tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2);
- } else {
- tcg_gen_xor_i64(ret, arg1, arg2);
- tcg_gen_not_i64(ret, ret);
- }
-#else
- tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
- tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
-#endif
-}
-
-static inline void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_nand_i32) {
- tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2);
- } else {
- tcg_gen_and_i32(ret, arg1, arg2);
- tcg_gen_not_i32(ret, ret);
- }
-}
-
-static inline void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
-#if TCG_TARGET_REG_BITS == 64
- if (TCG_TARGET_HAS_nand_i64) {
- tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2);
- } else {
- tcg_gen_and_i64(ret, arg1, arg2);
- tcg_gen_not_i64(ret, ret);
- }
-#else
- tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
- tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
-#endif
-}
-
-static inline void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_nor_i32) {
- tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2);
- } else {
- tcg_gen_or_i32(ret, arg1, arg2);
- tcg_gen_not_i32(ret, ret);
- }
-}
-
-static inline void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
-#if TCG_TARGET_REG_BITS == 64
- if (TCG_TARGET_HAS_nor_i64) {
- tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2);
- } else {
- tcg_gen_or_i64(ret, arg1, arg2);
- tcg_gen_not_i64(ret, ret);
- }
-#else
- tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
- tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
-#endif
-}
-
-static inline void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_orc_i32) {
- tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2);
- } else {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_not_i32(t0, arg2);
- tcg_gen_or_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- }
-}
-
-static inline void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
-#if TCG_TARGET_REG_BITS == 64
- if (TCG_TARGET_HAS_orc_i64) {
- tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_not_i64(t0, arg2);
- tcg_gen_or_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- }
-#else
- tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
- tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
-#endif
-}
-
-static inline void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_rot_i32) {
- tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2);
- } else {
- TCGv_i32 t0, t1;
-
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
- tcg_gen_shl_i32(t0, arg1, arg2);
- tcg_gen_subfi_i32(t1, 32, arg2);
- tcg_gen_shr_i32(t1, arg1, t1);
- tcg_gen_or_i32(ret, t0, t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
+ tcg_gen_subfi_i64(ret, 0, arg);
}
}
-static inline void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- if (TCG_TARGET_HAS_rot_i64) {
- tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
- } else {
- TCGv_i64 t0, t1;
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
- tcg_gen_shl_i64(t0, arg1, arg2);
- tcg_gen_subfi_i64(t1, 64, arg2);
- tcg_gen_shr_i64(t1, arg1, t1);
- tcg_gen_or_i64(ret, t0, t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
-}
+/* Size changing operations. */
-static inline void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else if (TCG_TARGET_HAS_rot_i32) {
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_rotl_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
- } else {
- TCGv_i32 t0, t1;
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
- tcg_gen_shli_i32(t0, arg1, arg2);
- tcg_gen_shri_i32(t1, arg1, 32 - arg2);
- tcg_gen_or_i32(ret, t0, t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- }
-}
+void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
+void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
+void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high);
+void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg, unsigned int c);
+void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg);
+void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg);
-static inline void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
+static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
{
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else if (TCG_TARGET_HAS_rot_i64) {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_rotl_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
- } else {
- TCGv_i64 t0, t1;
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
- tcg_gen_shli_i64(t0, arg1, arg2);
- tcg_gen_shri_i64(t1, arg1, 64 - arg2);
- tcg_gen_or_i64(ret, t0, t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
-}
-
-static inline void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_rot_i32) {
- tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2);
- } else {
- TCGv_i32 t0, t1;
-
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
- tcg_gen_shr_i32(t0, arg1, arg2);
- tcg_gen_subfi_i32(t1, 32, arg2);
- tcg_gen_shl_i32(t1, arg1, t1);
- tcg_gen_or_i32(ret, t0, t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- }
-}
-
-static inline void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- if (TCG_TARGET_HAS_rot_i64) {
- tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
- } else {
- TCGv_i64 t0, t1;
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
- tcg_gen_shr_i64(t0, arg1, arg2);
- tcg_gen_subfi_i64(t1, 64, arg2);
- tcg_gen_shl_i64(t1, arg1, t1);
- tcg_gen_or_i64(ret, t0, t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
-}
-
-static inline void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
-{
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i32(ret, arg1);
- } else {
- tcg_gen_rotli_i32(ret, arg1, 32 - arg2);
- }
-}
-
-static inline void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
-{
- /* some cases can be optimized here */
- if (arg2 == 0) {
- tcg_gen_mov_i64(ret, arg1);
- } else {
- tcg_gen_rotli_i64(ret, arg1, 64 - arg2);
- }
-}
-
-static inline void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1,
- TCGv_i32 arg2, unsigned int ofs,
- unsigned int len)
-{
- uint32_t mask;
- TCGv_i32 t1;
-
- tcg_debug_assert(ofs < 32);
- tcg_debug_assert(len <= 32);
- tcg_debug_assert(ofs + len <= 32);
-
- if (ofs == 0 && len == 32) {
- tcg_gen_mov_i32(ret, arg2);
- return;
- }
- if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) {
- tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len);
- return;
- }
-
- mask = (1u << len) - 1;
- t1 = tcg_temp_new_i32();
-
- if (ofs + len < 32) {
- tcg_gen_andi_i32(t1, arg2, mask);
- tcg_gen_shli_i32(t1, t1, ofs);
- } else {
- tcg_gen_shli_i32(t1, arg2, ofs);
- }
- tcg_gen_andi_i32(ret, arg1, ~(mask << ofs));
- tcg_gen_or_i32(ret, ret, t1);
-
- tcg_temp_free_i32(t1);
-}
-
-static inline void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1,
- TCGv_i64 arg2, unsigned int ofs,
- unsigned int len)
-{
- uint64_t mask;
- TCGv_i64 t1;
-
- tcg_debug_assert(ofs < 64);
- tcg_debug_assert(len <= 64);
- tcg_debug_assert(ofs + len <= 64);
-
- if (ofs == 0 && len == 64) {
- tcg_gen_mov_i64(ret, arg2);
- return;
- }
- if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) {
- tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len);
- return;
- }
-
-#if TCG_TARGET_REG_BITS == 32
- if (ofs >= 32) {
- tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1),
- TCGV_LOW(arg2), ofs - 32, len);
- tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
- return;
- }
- if (ofs + len <= 32) {
- tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(arg1),
- TCGV_LOW(arg2), ofs, len);
- tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
- return;
- }
-#endif
-
- mask = (1ull << len) - 1;
- t1 = tcg_temp_new_i64();
-
- if (ofs + len < 64) {
- tcg_gen_andi_i64(t1, arg2, mask);
- tcg_gen_shli_i64(t1, t1, ofs);
- } else {
- tcg_gen_shli_i64(t1, arg2, ofs);
- }
- tcg_gen_andi_i64(ret, arg1, ~(mask << ofs));
- tcg_gen_or_i64(ret, ret, t1);
-
- tcg_temp_free_i64(t1);
-}
-
-static inline void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low,
- TCGv_i32 high)
-{
-#if TCG_TARGET_REG_BITS == 32
- tcg_gen_mov_i32(TCGV_LOW(dest), low);
- tcg_gen_mov_i32(TCGV_HIGH(dest), high);
-#else
- TCGv_i64 tmp = tcg_temp_new_i64();
- /* These extensions are only needed for type correctness.
- We may be able to do better given target specific information. */
- tcg_gen_extu_i32_i64(tmp, high);
- tcg_gen_extu_i32_i64(dest, low);
- /* If deposit is available, use it. Otherwise use the extra
- knowledge that we have of the zero-extensions above. */
- if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) {
- tcg_gen_deposit_i64(dest, dest, tmp, 32, 32);
- } else {
- tcg_gen_shli_i64(tmp, tmp, 32);
- tcg_gen_or_i64(dest, dest, tmp);
- }
- tcg_temp_free_i64(tmp);
-#endif
-}
-
-static inline void tcg_gen_concat32_i64(TCGv_i64 dest, TCGv_i64 low,
- TCGv_i64 high)
-{
- tcg_gen_deposit_i64(dest, low, high, 32, 32);
+ tcg_gen_deposit_i64(ret, lo, hi, 32, 32);
}
static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
@@ -2170,298 +701,30 @@ static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
tcg_gen_trunc_shr_i64_i32(ret, arg, 0);
}
-static inline void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg)
-{
- tcg_gen_trunc_shr_i64_i32(lo, arg, 0);
- tcg_gen_trunc_shr_i64_i32(hi, arg, 32);
-}
-
-static inline void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg)
-{
- tcg_gen_ext32u_i64(lo, arg);
- tcg_gen_shri_i64(hi, arg, 32);
-}
+/* QEMU specific operations. */
-static inline void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret,
- TCGv_i32 c1, TCGv_i32 c2,
- TCGv_i32 v1, TCGv_i32 v2)
-{
- if (TCG_TARGET_HAS_movcond_i32) {
- tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond);
- } else {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- tcg_gen_setcond_i32(cond, t0, c1, c2);
- tcg_gen_neg_i32(t0, t0);
- tcg_gen_and_i32(t1, v1, t0);
- tcg_gen_andc_i32(ret, v2, t0);
- tcg_gen_or_i32(ret, ret, t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- }
-}
+#ifndef TARGET_LONG_BITS
+#error must include QEMU headers
+#endif
-static inline void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret,
- TCGv_i64 c1, TCGv_i64 c2,
- TCGv_i64 v1, TCGv_i64 v2)
+/* debug info: write the PC of the corresponding QEMU CPU instruction */
+static inline void tcg_gen_debug_insn_start(uint64_t pc)
{
-#if TCG_TARGET_REG_BITS == 32
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0,
- TCGV_LOW(c1), TCGV_HIGH(c1),
- TCGV_LOW(c2), TCGV_HIGH(c2), cond);
-
- if (TCG_TARGET_HAS_movcond_i32) {
- tcg_gen_movi_i32(t1, 0);
- tcg_gen_movcond_i32(TCG_COND_NE, TCGV_LOW(ret), t0, t1,
- TCGV_LOW(v1), TCGV_LOW(v2));
- tcg_gen_movcond_i32(TCG_COND_NE, TCGV_HIGH(ret), t0, t1,
- TCGV_HIGH(v1), TCGV_HIGH(v2));
- } else {
- tcg_gen_neg_i32(t0, t0);
-
- tcg_gen_and_i32(t1, TCGV_LOW(v1), t0);
- tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(v2), t0);
- tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t1);
-
- tcg_gen_and_i32(t1, TCGV_HIGH(v1), t0);
- tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(v2), t0);
- tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t1);
- }
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
+ /* XXX: must really use a 32 bit size for TCGArg in all cases */
+#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
+ tcg_gen_op2ii(INDEX_op_debug_insn_start,
+ (uint32_t)(pc), (uint32_t)(pc >> 32));
#else
- if (TCG_TARGET_HAS_movcond_i64) {
- tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_setcond_i64(cond, t0, c1, c2);
- tcg_gen_neg_i64(t0, t0);
- tcg_gen_and_i64(t1, v1, t0);
- tcg_gen_andc_i64(ret, v2, t0);
- tcg_gen_or_i64(ret, ret, t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
+ tcg_gen_op1i(INDEX_op_debug_insn_start, pc);
#endif
}
-static inline void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
- TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
-{
- if (TCG_TARGET_HAS_add2_i32) {
- tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
- /* Allow the optimizer room to replace add2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_concat_i32_i64(t0, al, ah);
- tcg_gen_concat_i32_i64(t1, bl, bh);
- tcg_gen_add_i64(t0, t0, t1);
- tcg_gen_extr_i64_i32(rl, rh, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
-}
-
-static inline void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
- TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
-{
- if (TCG_TARGET_HAS_sub2_i32) {
- tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
- /* Allow the optimizer room to replace sub2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_concat_i32_i64(t0, al, ah);
- tcg_gen_concat_i32_i64(t1, bl, bh);
- tcg_gen_sub_i64(t0, t0, t1);
- tcg_gen_extr_i64_i32(rl, rh, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
-}
-
-static inline void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh,
- TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_mulu2_i32) {
- tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
- /* Allow the optimizer room to replace mulu2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
- } else if (TCG_TARGET_HAS_muluh_i32) {
- TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
- tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
- tcg_gen_mov_i32(rl, t);
- tcg_temp_free_i32(t);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_extu_i32_i64(t0, arg1);
- tcg_gen_extu_i32_i64(t1, arg2);
- tcg_gen_mul_i64(t0, t0, t1);
- tcg_gen_extr_i64_i32(rl, rh, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
-}
-
-static inline void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh,
- TCGv_i32 arg1, TCGv_i32 arg2)
-{
- if (TCG_TARGET_HAS_muls2_i32) {
- tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
- /* Allow the optimizer room to replace muls2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
- } else if (TCG_TARGET_HAS_mulsh_i32) {
- TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
- tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
- tcg_gen_mov_i32(rl, t);
- tcg_temp_free_i32(t);
- } else if (TCG_TARGET_REG_BITS == 32) {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_temp_new_i32();
- TCGv_i32 t3 = tcg_temp_new_i32();
- tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
- /* Adjust for negative inputs. */
- tcg_gen_sari_i32(t2, arg1, 31);
- tcg_gen_sari_i32(t3, arg2, 31);
- tcg_gen_and_i32(t2, t2, arg2);
- tcg_gen_and_i32(t3, t3, arg1);
- tcg_gen_sub_i32(rh, t1, t2);
- tcg_gen_sub_i32(rh, rh, t3);
- tcg_gen_mov_i32(rl, t0);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_ext_i32_i64(t0, arg1);
- tcg_gen_ext_i32_i64(t1, arg2);
- tcg_gen_mul_i64(t0, t0, t1);
- tcg_gen_extr_i64_i32(rl, rh, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
-}
-
-static inline void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
-{
- if (TCG_TARGET_HAS_add2_i64) {
- tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh);
- /* Allow the optimizer room to replace add2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_add_i64(t0, al, bl);
- tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al);
- tcg_gen_add_i64(rh, ah, bh);
- tcg_gen_add_i64(rh, rh, t1);
- tcg_gen_mov_i64(rl, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
-}
-
-static inline void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
-{
- if (TCG_TARGET_HAS_sub2_i64) {
- tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh);
- /* Allow the optimizer room to replace sub2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_sub_i64(t0, al, bl);
- tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl);
- tcg_gen_sub_i64(rh, ah, bh);
- tcg_gen_sub_i64(rh, rh, t1);
- tcg_gen_mov_i64(rl, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
-}
-
-static inline void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh,
- TCGv_i64 arg1, TCGv_i64 arg2)
+static inline void tcg_gen_exit_tb(uintptr_t val)
{
- if (TCG_TARGET_HAS_mulu2_i64) {
- tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
- /* Allow the optimizer room to replace mulu2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
- } else if (TCG_TARGET_HAS_muluh_i64) {
- TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
- tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
- tcg_gen_mov_i64(rl, t);
- tcg_temp_free_i64(t);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_mul_i64(t0, arg1, arg2);
- gen_helper_muluh_i64(rh, arg1, arg2);
- tcg_gen_mov_i64(rl, t0);
- tcg_temp_free_i64(t0);
- }
+ tcg_gen_op1i(INDEX_op_exit_tb, val);
}
-static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh,
- TCGv_i64 arg1, TCGv_i64 arg2)
-{
- if (TCG_TARGET_HAS_muls2_i64) {
- tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
- /* Allow the optimizer room to replace muls2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
- } else if (TCG_TARGET_HAS_mulsh_i64) {
- TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
- tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
- tcg_gen_mov_i64(rl, t);
- tcg_temp_free_i64(t);
- } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 t2 = tcg_temp_new_i64();
- TCGv_i64 t3 = tcg_temp_new_i64();
- tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
- /* Adjust for negative inputs. */
- tcg_gen_sari_i64(t2, arg1, 63);
- tcg_gen_sari_i64(t3, arg2, 63);
- tcg_gen_and_i64(t2, t2, arg2);
- tcg_gen_and_i64(t3, t3, arg1);
- tcg_gen_sub_i64(rh, t1, t2);
- tcg_gen_sub_i64(rh, rh, t3);
- tcg_gen_mov_i64(rl, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
- } else {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_mul_i64(t0, arg1, arg2);
- gen_helper_mulsh_i64(rh, arg1, arg2);
- tcg_gen_mov_i64(rl, t0);
- tcg_temp_free_i64(t0);
- }
-}
-
-/***************************************/
-/* QEMU specific operations. Their type depend on the QEMU CPU
- type. */
-#ifndef TARGET_LONG_BITS
-#error must include QEMU headers
-#endif
+void tcg_gen_goto_tb(unsigned idx);
#if TARGET_LONG_BITS == 32
#define TCGv TCGv_i32
@@ -2473,7 +736,6 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh,
#define TCGV_UNUSED(x) TCGV_UNUSED_I32(x)
#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I32(x)
#define TCGV_EQUAL(a, b) TCGV_EQUAL_I32(a, b)
-#define tcg_add_param_tl tcg_add_param_i32
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
#else
@@ -2486,41 +748,10 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh,
#define TCGV_UNUSED(x) TCGV_UNUSED_I64(x)
#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I64(x)
#define TCGV_EQUAL(a, b) TCGV_EQUAL_I64(a, b)
-#define tcg_add_param_tl tcg_add_param_i64
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
#endif
-/* debug info: write the PC of the corresponding QEMU CPU instruction */
-static inline void tcg_gen_debug_insn_start(uint64_t pc)
-{
- /* XXX: must really use a 32 bit size for TCGArg in all cases */
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
- tcg_gen_op2ii(INDEX_op_debug_insn_start,
- (uint32_t)(pc), (uint32_t)(pc >> 32));
-#else
- tcg_gen_op1i(INDEX_op_debug_insn_start, pc);
-#endif
-}
-
-static inline void tcg_gen_exit_tb(uintptr_t val)
-{
- tcg_gen_op1i(INDEX_op_exit_tb, val);
-}
-
-static inline void tcg_gen_goto_tb(unsigned idx)
-{
- /* We only support two chained exits. */
- tcg_debug_assert(idx <= 1);
-#ifdef CONFIG_DEBUG_TCG
- /* Verify that we havn't seen this numbered exit before. */
- tcg_debug_assert((tcg_ctx.goto_tb_issue_mask & (1 << idx)) == 0);
- tcg_ctx.goto_tb_issue_mask |= 1 << idx;
-#endif
- tcg_gen_op1i(INDEX_op_goto_tb, idx);
-}
-
-
void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp);
diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h
index 042d442c7e..42d0cfef97 100644
--- a/tcg/tcg-opc.h
+++ b/tcg/tcg-opc.h
@@ -27,15 +27,6 @@
*/
/* predefined ops */
-DEF(end, 0, 0, 0, TCG_OPF_NOT_PRESENT) /* must be kept first */
-DEF(nop, 0, 0, 0, TCG_OPF_NOT_PRESENT)
-DEF(nop1, 0, 0, 1, TCG_OPF_NOT_PRESENT)
-DEF(nop2, 0, 0, 2, TCG_OPF_NOT_PRESENT)
-DEF(nop3, 0, 0, 3, TCG_OPF_NOT_PRESENT)
-
-/* variable number of parameters */
-DEF(nopn, 0, 0, 1, TCG_OPF_NOT_PRESENT)
-
DEF(discard, 1, 0, 0, TCG_OPF_NOT_PRESENT)
DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 6ff8b51198..3841e9951c 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -407,7 +407,6 @@ void tcg_func_start(TCGContext *s)
/* No temps have been previously allocated for size or locality. */
memset(s->free_temps, 0, sizeof(s->free_temps));
- s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS);
s->nb_labels = 0;
s->current_frame_offset = s->frame_start;
@@ -415,8 +414,10 @@ void tcg_func_start(TCGContext *s)
s->goto_tb_issue_mask = 0;
#endif
- s->gen_opc_ptr = s->gen_opc_buf;
- s->gen_opparam_ptr = s->gen_opparam_buf;
+ s->gen_first_op_idx = 0;
+ s->gen_last_op_idx = -1;
+ s->gen_next_op_idx = 0;
+ s->gen_next_parm_idx = 0;
s->be = tcg_malloc(sizeof(TCGBackendData));
}
@@ -703,9 +704,8 @@ int tcg_check_temp_count(void)
void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
int nargs, TCGArg *args)
{
- int i, real_args, nb_rets;
+ int i, real_args, nb_rets, pi, pi_first;
unsigned sizemask, flags;
- TCGArg *nparam;
TCGHelperInfo *info;
info = g_hash_table_lookup(s->helpers, (gpointer)func);
@@ -758,8 +758,7 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
}
#endif /* TCG_TARGET_EXTEND_ARGS */
- *s->gen_opc_ptr++ = INDEX_op_call;
- nparam = s->gen_opparam_ptr++;
+ pi_first = pi = s->gen_next_parm_idx;
if (ret != TCG_CALL_DUMMY_ARG) {
#if defined(__sparc__) && !defined(__arch64__) \
&& !defined(CONFIG_TCG_INTERPRETER)
@@ -769,25 +768,25 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
two return temporaries, and reassemble below. */
retl = tcg_temp_new_i64();
reth = tcg_temp_new_i64();
- *s->gen_opparam_ptr++ = GET_TCGV_I64(reth);
- *s->gen_opparam_ptr++ = GET_TCGV_I64(retl);
+ s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
+ s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
nb_rets = 2;
} else {
- *s->gen_opparam_ptr++ = ret;
+ s->gen_opparam_buf[pi++] = ret;
nb_rets = 1;
}
#else
if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
#ifdef HOST_WORDS_BIGENDIAN
- *s->gen_opparam_ptr++ = ret + 1;
- *s->gen_opparam_ptr++ = ret;
+ s->gen_opparam_buf[pi++] = ret + 1;
+ s->gen_opparam_buf[pi++] = ret;
#else
- *s->gen_opparam_ptr++ = ret;
- *s->gen_opparam_ptr++ = ret + 1;
+ s->gen_opparam_buf[pi++] = ret;
+ s->gen_opparam_buf[pi++] = ret + 1;
#endif
nb_rets = 2;
} else {
- *s->gen_opparam_ptr++ = ret;
+ s->gen_opparam_buf[pi++] = ret;
nb_rets = 1;
}
#endif
@@ -801,7 +800,7 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
/* some targets want aligned 64 bit args */
if (real_args & 1) {
- *s->gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG;
+ s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
real_args++;
}
#endif
@@ -816,26 +815,42 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
have to get more complicated to differentiate between
stack arguments and register arguments. */
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
- *s->gen_opparam_ptr++ = args[i] + 1;
- *s->gen_opparam_ptr++ = args[i];
+ s->gen_opparam_buf[pi++] = args[i] + 1;
+ s->gen_opparam_buf[pi++] = args[i];
#else
- *s->gen_opparam_ptr++ = args[i];
- *s->gen_opparam_ptr++ = args[i] + 1;
+ s->gen_opparam_buf[pi++] = args[i];
+ s->gen_opparam_buf[pi++] = args[i] + 1;
#endif
real_args += 2;
continue;
}
- *s->gen_opparam_ptr++ = args[i];
+ s->gen_opparam_buf[pi++] = args[i];
real_args++;
}
- *s->gen_opparam_ptr++ = (uintptr_t)func;
- *s->gen_opparam_ptr++ = flags;
+ s->gen_opparam_buf[pi++] = (uintptr_t)func;
+ s->gen_opparam_buf[pi++] = flags;
+
+ i = s->gen_next_op_idx;
+ tcg_debug_assert(i < OPC_BUF_SIZE);
+ tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
+
+ /* Set links for sequential allocation during translation. */
+ s->gen_op_buf[i] = (TCGOp){
+ .opc = INDEX_op_call,
+ .callo = nb_rets,
+ .calli = real_args,
+ .args = pi_first,
+ .prev = i - 1,
+ .next = i + 1
+ };
- *nparam = (nb_rets << 16) | real_args;
+ /* Make sure the calli field didn't overflow. */
+ tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
- /* total parameters, needed to go backward in the instruction stream */
- *s->gen_opparam_ptr++ = 1 + nb_rets + real_args + 3;
+ s->gen_last_op_idx = i;
+ s->gen_next_op_idx = i + 1;
+ s->gen_next_parm_idx = pi;
#if defined(__sparc__) && !defined(__arch64__) \
&& !defined(CONFIG_TCG_INTERPRETER)
@@ -870,143 +885,6 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
#endif /* TCG_TARGET_EXTEND_ARGS */
}
-#if TCG_TARGET_REG_BITS == 32
-void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
- int c, int right, int arith)
-{
- if (c == 0) {
- tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
- tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
- } else if (c >= 32) {
- c -= 32;
- if (right) {
- if (arith) {
- tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
- } else {
- tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
- }
- } else {
- tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
- tcg_gen_movi_i32(TCGV_LOW(ret), 0);
- }
- } else {
- TCGv_i32 t0, t1;
-
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
- if (right) {
- tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
- if (arith)
- tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
- else
- tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
- tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
- tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
- tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
- } else {
- tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
- /* Note: ret can be the same as arg1, so we use t1 */
- tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
- tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
- tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
- tcg_gen_mov_i32(TCGV_LOW(ret), t1);
- }
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- }
-}
-#endif
-
-static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
-{
- switch (op & MO_SIZE) {
- case MO_8:
- op &= ~MO_BSWAP;
- break;
- case MO_16:
- break;
- case MO_32:
- if (!is64) {
- op &= ~MO_SIGN;
- }
- break;
- case MO_64:
- if (!is64) {
- tcg_abort();
- }
- break;
- }
- if (st) {
- op &= ~MO_SIGN;
- }
- return op;
-}
-
-void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
-{
- memop = tcg_canonicalize_memop(memop, 0, 0);
-
- *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i32;
- tcg_add_param_i32(val);
- tcg_add_param_tl(addr);
- *tcg_ctx.gen_opparam_ptr++ = memop;
- *tcg_ctx.gen_opparam_ptr++ = idx;
-}
-
-void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
-{
- memop = tcg_canonicalize_memop(memop, 0, 1);
-
- *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i32;
- tcg_add_param_i32(val);
- tcg_add_param_tl(addr);
- *tcg_ctx.gen_opparam_ptr++ = memop;
- *tcg_ctx.gen_opparam_ptr++ = idx;
-}
-
-void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
-{
- memop = tcg_canonicalize_memop(memop, 1, 0);
-
-#if TCG_TARGET_REG_BITS == 32
- if ((memop & MO_SIZE) < MO_64) {
- tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
- if (memop & MO_SIGN) {
- tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
- } else {
- tcg_gen_movi_i32(TCGV_HIGH(val), 0);
- }
- return;
- }
-#endif
-
- *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i64;
- tcg_add_param_i64(val);
- tcg_add_param_tl(addr);
- *tcg_ctx.gen_opparam_ptr++ = memop;
- *tcg_ctx.gen_opparam_ptr++ = idx;
-}
-
-void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
-{
- memop = tcg_canonicalize_memop(memop, 1, 1);
-
-#if TCG_TARGET_REG_BITS == 32
- if ((memop & MO_SIZE) < MO_64) {
- tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
- return;
- }
-#endif
-
- *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i64;
- tcg_add_param_i64(val);
- tcg_add_param_tl(addr);
- *tcg_ctx.gen_opparam_ptr++ = memop;
- *tcg_ctx.gen_opparam_ptr++ = idx;
-}
-
static void tcg_reg_alloc_start(TCGContext *s)
{
int i;
@@ -1109,20 +987,21 @@ static const char * const ldst_name[] =
void tcg_dump_ops(TCGContext *s)
{
- const uint16_t *opc_ptr;
- const TCGArg *args;
- TCGArg arg;
- TCGOpcode c;
- int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn;
- const TCGOpDef *def;
char buf[128];
+ TCGOp *op;
+ int oi;
- first_insn = 1;
- opc_ptr = s->gen_opc_buf;
- args = s->gen_opparam_buf;
- while (opc_ptr < s->gen_opc_ptr) {
- c = *opc_ptr++;
+ for (oi = s->gen_first_op_idx; oi >= 0; oi = op->next) {
+ int i, k, nb_oargs, nb_iargs, nb_cargs;
+ const TCGOpDef *def;
+ const TCGArg *args;
+ TCGOpcode c;
+
+ op = &s->gen_op_buf[oi];
+ c = op->opc;
def = &tcg_op_defs[c];
+ args = &s->gen_opparam_buf[op->args];
+
if (c == INDEX_op_debug_insn_start) {
uint64_t pc;
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
@@ -1130,21 +1009,14 @@ void tcg_dump_ops(TCGContext *s)
#else
pc = args[0];
#endif
- if (!first_insn) {
+ if (oi != s->gen_first_op_idx) {
qemu_log("\n");
}
qemu_log(" ---- 0x%" PRIx64, pc);
- first_insn = 0;
- nb_oargs = def->nb_oargs;
- nb_iargs = def->nb_iargs;
- nb_cargs = def->nb_cargs;
} else if (c == INDEX_op_call) {
- TCGArg arg;
-
/* variable number of arguments */
- arg = *args++;
- nb_oargs = arg >> 16;
- nb_iargs = arg & 0xffff;
+ nb_oargs = op->callo;
+ nb_iargs = op->calli;
nb_cargs = def->nb_cargs;
/* function name, flags, out args */
@@ -1165,26 +1037,20 @@ void tcg_dump_ops(TCGContext *s)
}
} else {
qemu_log(" %s ", def->name);
- if (c == INDEX_op_nopn) {
- /* variable number of arguments */
- nb_cargs = *args;
- nb_oargs = 0;
- nb_iargs = 0;
- } else {
- nb_oargs = def->nb_oargs;
- nb_iargs = def->nb_iargs;
- nb_cargs = def->nb_cargs;
- }
-
+
+ nb_oargs = def->nb_oargs;
+ nb_iargs = def->nb_iargs;
+ nb_cargs = def->nb_cargs;
+
k = 0;
- for(i = 0; i < nb_oargs; i++) {
+ for (i = 0; i < nb_oargs; i++) {
if (k != 0) {
qemu_log(",");
}
qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
args[k++]));
}
- for(i = 0; i < nb_iargs; i++) {
+ for (i = 0; i < nb_iargs; i++) {
if (k != 0) {
qemu_log(",");
}
@@ -1222,16 +1088,14 @@ void tcg_dump_ops(TCGContext *s)
i = 0;
break;
}
- for(; i < nb_cargs; i++) {
+ for (; i < nb_cargs; i++) {
if (k != 0) {
qemu_log(",");
}
- arg = args[k++];
- qemu_log("$0x%" TCG_PRIlx, arg);
+ qemu_log("$0x%" TCG_PRIlx, args[k++]);
}
}
qemu_log("\n");
- args += nb_iargs + nb_oargs + nb_cargs;
}
}
@@ -1380,21 +1244,30 @@ void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
#endif
}
-#ifdef USE_LIVENESS_ANALYSIS
-
-/* set a nop for an operation using 'nb_args' */
-static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr,
- TCGArg *args, int nb_args)
+void tcg_op_remove(TCGContext *s, TCGOp *op)
{
- if (nb_args == 0) {
- *opc_ptr = INDEX_op_nop;
+ int next = op->next;
+ int prev = op->prev;
+
+ if (next >= 0) {
+ s->gen_op_buf[next].prev = prev;
+ } else {
+ s->gen_last_op_idx = prev;
+ }
+ if (prev >= 0) {
+ s->gen_op_buf[prev].next = next;
} else {
- *opc_ptr = INDEX_op_nopn;
- args[0] = nb_args;
- args[nb_args - 1] = nb_args;
+ s->gen_first_op_idx = next;
}
+
+ memset(op, -1, sizeof(*op));
+
+#ifdef CONFIG_PROFILER
+ s->del_op_count++;
+#endif
}
+#ifdef USE_LIVENESS_ANALYSIS
/* liveness analysis: end of function: all temps are dead, and globals
should be in memory. */
static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps,
@@ -1424,19 +1297,10 @@ static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
temporaries are removed. */
static void tcg_liveness_analysis(TCGContext *s)
{
- int i, op_index, nb_args, nb_iargs, nb_oargs, nb_ops;
- TCGOpcode op, op_new, op_new2;
- TCGArg *args, arg;
- const TCGOpDef *def;
uint8_t *dead_temps, *mem_temps;
- uint16_t dead_args;
- uint8_t sync_args;
- bool have_op_new2;
-
- s->gen_opc_ptr++; /* skip end */
-
- nb_ops = s->gen_opc_ptr - s->gen_opc_buf;
+ int oi, oi_prev, nb_ops;
+ nb_ops = s->gen_next_op_idx;
s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
@@ -1444,25 +1308,31 @@ static void tcg_liveness_analysis(TCGContext *s)
mem_temps = tcg_malloc(s->nb_temps);
tcg_la_func_end(s, dead_temps, mem_temps);
- args = s->gen_opparam_ptr;
- op_index = nb_ops - 1;
- while (op_index >= 0) {
- op = s->gen_opc_buf[op_index];
- def = &tcg_op_defs[op];
- switch(op) {
+ for (oi = s->gen_last_op_idx; oi >= 0; oi = oi_prev) {
+ int i, nb_iargs, nb_oargs;
+ TCGOpcode opc_new, opc_new2;
+ bool have_opc_new2;
+ uint16_t dead_args;
+ uint8_t sync_args;
+ TCGArg arg;
+
+ TCGOp * const op = &s->gen_op_buf[oi];
+ TCGArg * const args = &s->gen_opparam_buf[op->args];
+ TCGOpcode opc = op->opc;
+ const TCGOpDef *def = &tcg_op_defs[opc];
+
+ oi_prev = op->prev;
+
+ switch (opc) {
case INDEX_op_call:
{
int call_flags;
- nb_args = args[-1];
- args -= nb_args;
- arg = *args++;
- nb_iargs = arg & 0xffff;
- nb_oargs = arg >> 16;
+ nb_oargs = op->callo;
+ nb_iargs = op->calli;
call_flags = args[nb_oargs + nb_iargs + 1];
- /* pure functions can be removed if their result is not
- used */
+ /* pure functions can be removed if their result is unused */
if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
for (i = 0; i < nb_oargs; i++) {
arg = args[i];
@@ -1470,8 +1340,7 @@ static void tcg_liveness_analysis(TCGContext *s)
goto do_not_remove_call;
}
}
- tcg_set_nop(s, s->gen_opc_buf + op_index,
- args - 1, nb_args);
+ goto do_remove;
} else {
do_not_remove_call:
@@ -1510,41 +1379,31 @@ static void tcg_liveness_analysis(TCGContext *s)
dead_temps[arg] = 0;
}
}
- s->op_dead_args[op_index] = dead_args;
- s->op_sync_args[op_index] = sync_args;
+ s->op_dead_args[oi] = dead_args;
+ s->op_sync_args[oi] = sync_args;
}
- args--;
}
break;
case INDEX_op_debug_insn_start:
- args -= def->nb_args;
- break;
- case INDEX_op_nopn:
- nb_args = args[-1];
- args -= nb_args;
break;
case INDEX_op_discard:
- args--;
/* mark the temporary as dead */
dead_temps[args[0]] = 1;
mem_temps[args[0]] = 0;
break;
- case INDEX_op_end:
- break;
case INDEX_op_add2_i32:
- op_new = INDEX_op_add_i32;
+ opc_new = INDEX_op_add_i32;
goto do_addsub2;
case INDEX_op_sub2_i32:
- op_new = INDEX_op_sub_i32;
+ opc_new = INDEX_op_sub_i32;
goto do_addsub2;
case INDEX_op_add2_i64:
- op_new = INDEX_op_add_i64;
+ opc_new = INDEX_op_add_i64;
goto do_addsub2;
case INDEX_op_sub2_i64:
- op_new = INDEX_op_sub_i64;
+ opc_new = INDEX_op_sub_i64;
do_addsub2:
- args -= 6;
nb_iargs = 4;
nb_oargs = 2;
/* Test if the high part of the operation is dead, but not
@@ -1555,12 +1414,11 @@ static void tcg_liveness_analysis(TCGContext *s)
if (dead_temps[args[0]] && !mem_temps[args[0]]) {
goto do_remove;
}
- /* Create the single operation plus nop. */
- s->gen_opc_buf[op_index] = op = op_new;
+ /* Replace the opcode and adjust the args in place,
+ leaving 3 unused args at the end. */
+ op->opc = opc = opc_new;
args[1] = args[2];
args[2] = args[4];
- assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
- tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 3);
/* Fall through and mark the single-word operation live. */
nb_iargs = 2;
nb_oargs = 1;
@@ -1568,27 +1426,26 @@ static void tcg_liveness_analysis(TCGContext *s)
goto do_not_remove;
case INDEX_op_mulu2_i32:
- op_new = INDEX_op_mul_i32;
- op_new2 = INDEX_op_muluh_i32;
- have_op_new2 = TCG_TARGET_HAS_muluh_i32;
+ opc_new = INDEX_op_mul_i32;
+ opc_new2 = INDEX_op_muluh_i32;
+ have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
goto do_mul2;
case INDEX_op_muls2_i32:
- op_new = INDEX_op_mul_i32;
- op_new2 = INDEX_op_mulsh_i32;
- have_op_new2 = TCG_TARGET_HAS_mulsh_i32;
+ opc_new = INDEX_op_mul_i32;
+ opc_new2 = INDEX_op_mulsh_i32;
+ have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
goto do_mul2;
case INDEX_op_mulu2_i64:
- op_new = INDEX_op_mul_i64;
- op_new2 = INDEX_op_muluh_i64;
- have_op_new2 = TCG_TARGET_HAS_muluh_i64;
+ opc_new = INDEX_op_mul_i64;
+ opc_new2 = INDEX_op_muluh_i64;
+ have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
goto do_mul2;
case INDEX_op_muls2_i64:
- op_new = INDEX_op_mul_i64;
- op_new2 = INDEX_op_mulsh_i64;
- have_op_new2 = TCG_TARGET_HAS_mulsh_i64;
+ opc_new = INDEX_op_mul_i64;
+ opc_new2 = INDEX_op_mulsh_i64;
+ have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
goto do_mul2;
do_mul2:
- args -= 4;
nb_iargs = 2;
nb_oargs = 2;
if (dead_temps[args[1]] && !mem_temps[args[1]]) {
@@ -1597,28 +1454,25 @@ static void tcg_liveness_analysis(TCGContext *s)
goto do_remove;
}
/* The high part of the operation is dead; generate the low. */
- s->gen_opc_buf[op_index] = op = op_new;
+ op->opc = opc = opc_new;
args[1] = args[2];
args[2] = args[3];
- } else if (have_op_new2 && dead_temps[args[0]]
+ } else if (have_opc_new2 && dead_temps[args[0]]
&& !mem_temps[args[0]]) {
- /* The low part of the operation is dead; generate the high. */
- s->gen_opc_buf[op_index] = op = op_new2;
+ /* The low part of the operation is dead; generate the high. */
+ op->opc = opc = opc_new2;
args[0] = args[1];
args[1] = args[2];
args[2] = args[3];
} else {
goto do_not_remove;
}
- assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
- tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 1);
/* Mark the single-word operation live. */
nb_oargs = 1;
goto do_not_remove;
default:
/* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
- args -= def->nb_args;
nb_iargs = def->nb_iargs;
nb_oargs = def->nb_oargs;
@@ -1626,24 +1480,20 @@ static void tcg_liveness_analysis(TCGContext *s)
its outputs are dead. We assume that nb_oargs == 0
implies side effects */
if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
- for(i = 0; i < nb_oargs; i++) {
+ for (i = 0; i < nb_oargs; i++) {
arg = args[i];
if (!dead_temps[arg] || mem_temps[arg]) {
goto do_not_remove;
}
}
do_remove:
- tcg_set_nop(s, s->gen_opc_buf + op_index, args, def->nb_args);
-#ifdef CONFIG_PROFILER
- s->del_op_count++;
-#endif
+ tcg_op_remove(s, op);
} else {
do_not_remove:
-
/* output args are dead */
dead_args = 0;
sync_args = 0;
- for(i = 0; i < nb_oargs; i++) {
+ for (i = 0; i < nb_oargs; i++) {
arg = args[i];
if (dead_temps[arg]) {
dead_args |= (1 << i);
@@ -1664,23 +1514,18 @@ static void tcg_liveness_analysis(TCGContext *s)
}
/* input args are live */
- for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
+ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
arg = args[i];
if (dead_temps[arg]) {
dead_args |= (1 << i);
}
dead_temps[arg] = 0;
}
- s->op_dead_args[op_index] = dead_args;
- s->op_sync_args[op_index] = sync_args;
+ s->op_dead_args[oi] = dead_args;
+ s->op_sync_args[oi] = sync_args;
}
break;
}
- op_index--;
- }
-
- if (args != s->gen_opparam_buf) {
- tcg_abort();
}
}
#else
@@ -2247,11 +2092,11 @@ static void tcg_reg_alloc_op(TCGContext *s,
#define STACK_DIR(x) (x)
#endif
-static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
- TCGOpcode opc, const TCGArg *args,
- uint16_t dead_args, uint8_t sync_args)
+static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
+ const TCGArg * const args, uint16_t dead_args,
+ uint8_t sync_args)
{
- int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
+ int flags, nb_regs, i, reg;
TCGArg arg;
TCGTemp *ts;
intptr_t stack_offset;
@@ -2260,22 +2105,16 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
int allocate_args;
TCGRegSet allocated_regs;
- arg = *args++;
-
- nb_oargs = arg >> 16;
- nb_iargs = arg & 0xffff;
- nb_params = nb_iargs;
-
func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
flags = args[nb_oargs + nb_iargs + 1];
nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
- if (nb_regs > nb_params) {
- nb_regs = nb_params;
+ if (nb_regs > nb_iargs) {
+ nb_regs = nb_iargs;
}
/* assign stack slots first */
- call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
+ call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
~(TCG_TARGET_STACK_ALIGN - 1);
allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
@@ -2286,7 +2125,7 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
}
stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
- for(i = nb_regs; i < nb_params; i++) {
+ for(i = nb_regs; i < nb_iargs; i++) {
arg = args[nb_oargs + i];
#ifdef TCG_TARGET_STACK_GROWSUP
stack_offset -= sizeof(tcg_target_long);
@@ -2393,8 +2232,6 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
}
}
}
-
- return nb_iargs + nb_oargs + def->nb_cargs + 1;
}
#ifdef CONFIG_PROFILER
@@ -2405,7 +2242,7 @@ void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
{
int i;
- for(i = INDEX_op_end; i < NB_OPS; i++) {
+ for (i = 0; i < NB_OPS; i++) {
cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
tcg_table_op_count[i]);
}
@@ -2422,10 +2259,7 @@ static inline int tcg_gen_code_common(TCGContext *s,
tcg_insn_unit *gen_code_buf,
long search_pc)
{
- TCGOpcode opc;
- int op_index;
- const TCGOpDef *def;
- const TCGArg *args;
+ int oi, oi_next;
#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
@@ -2440,8 +2274,7 @@ static inline int tcg_gen_code_common(TCGContext *s,
#endif
#ifdef USE_TCG_OPTIMIZATIONS
- s->gen_opparam_ptr =
- tcg_optimize(s, s->gen_opc_ptr, s->gen_opparam_buf, tcg_op_defs);
+ tcg_optimize(s);
#endif
#ifdef CONFIG_PROFILER
@@ -2470,42 +2303,30 @@ static inline int tcg_gen_code_common(TCGContext *s,
tcg_out_tb_init(s);
- args = s->gen_opparam_buf;
- op_index = 0;
+ for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) {
+ TCGOp * const op = &s->gen_op_buf[oi];
+ TCGArg * const args = &s->gen_opparam_buf[op->args];
+ TCGOpcode opc = op->opc;
+ const TCGOpDef *def = &tcg_op_defs[opc];
+ uint16_t dead_args = s->op_dead_args[oi];
+ uint8_t sync_args = s->op_sync_args[oi];
- for(;;) {
- opc = s->gen_opc_buf[op_index];
+ oi_next = op->next;
#ifdef CONFIG_PROFILER
tcg_table_op_count[opc]++;
#endif
- def = &tcg_op_defs[opc];
-#if 0
- printf("%s: %d %d %d\n", def->name,
- def->nb_oargs, def->nb_iargs, def->nb_cargs);
- // dump_regs(s);
-#endif
- switch(opc) {
+
+ switch (opc) {
case INDEX_op_mov_i32:
case INDEX_op_mov_i64:
- tcg_reg_alloc_mov(s, def, args, s->op_dead_args[op_index],
- s->op_sync_args[op_index]);
+ tcg_reg_alloc_mov(s, def, args, dead_args, sync_args);
break;
case INDEX_op_movi_i32:
case INDEX_op_movi_i64:
- tcg_reg_alloc_movi(s, args, s->op_dead_args[op_index],
- s->op_sync_args[op_index]);
+ tcg_reg_alloc_movi(s, args, dead_args, sync_args);
break;
case INDEX_op_debug_insn_start:
- /* debug instruction */
break;
- case INDEX_op_nop:
- case INDEX_op_nop1:
- case INDEX_op_nop2:
- case INDEX_op_nop3:
- break;
- case INDEX_op_nopn:
- args += args[0];
- goto next;
case INDEX_op_discard:
temp_dead(s, args[0]);
break;
@@ -2514,12 +2335,9 @@ static inline int tcg_gen_code_common(TCGContext *s,
tcg_out_label(s, args[0], s->code_ptr);
break;
case INDEX_op_call:
- args += tcg_reg_alloc_call(s, def, opc, args,
- s->op_dead_args[op_index],
- s->op_sync_args[op_index]);
- goto next;
- case INDEX_op_end:
- goto the_end;
+ tcg_reg_alloc_call(s, op->callo, op->calli, args,
+ dead_args, sync_args);
+ break;
default:
/* Sanity check that we've not introduced any unhandled opcodes. */
if (def->flags & TCG_OPF_NOT_PRESENT) {
@@ -2528,21 +2346,17 @@ static inline int tcg_gen_code_common(TCGContext *s,
/* Note: in order to speed up the code, it would be much
faster to have specialized register allocator functions for
some common argument patterns */
- tcg_reg_alloc_op(s, def, opc, args, s->op_dead_args[op_index],
- s->op_sync_args[op_index]);
+ tcg_reg_alloc_op(s, def, opc, args, dead_args, sync_args);
break;
}
- args += def->nb_args;
- next:
if (search_pc >= 0 && search_pc < tcg_current_code_size(s)) {
- return op_index;
+ return oi;
}
- op_index++;
#ifndef NDEBUG
check_regs(s);
#endif
}
- the_end:
+
/* Generate TB finalization at the end of block */
tcg_out_tb_finalize(s);
return -1;
@@ -2553,14 +2367,18 @@ int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
#ifdef CONFIG_PROFILER
{
int n;
- n = (s->gen_opc_ptr - s->gen_opc_buf);
+
+ n = s->gen_last_op_idx + 1;
s->op_count += n;
- if (n > s->op_count_max)
+ if (n > s->op_count_max) {
s->op_count_max = n;
+ }
- s->temp_count += s->nb_temps;
- if (s->nb_temps > s->temp_count_max)
- s->temp_count_max = s->nb_temps;
+ n = s->nb_temps;
+ s->temp_count += n;
+ if (n > s->temp_count_max) {
+ s->temp_count_max = n;
+ }
}
#endif
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 944b87712a..f941965d53 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -448,10 +448,28 @@ typedef struct TCGTempSet {
unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
} TCGTempSet;
+typedef struct TCGOp {
+ TCGOpcode opc : 8;
+
+ /* The number of out and in parameter for a call. */
+ unsigned callo : 2;
+ unsigned calli : 6;
+
+ /* Index of the arguments for this op, or -1 for zero-operand ops. */
+ signed args : 16;
+
+ /* Index of the prex/next op, or -1 for the end of the list. */
+ signed prev : 16;
+ signed next : 16;
+} TCGOp;
+
+QEMU_BUILD_BUG_ON(NB_OPS > 0xff);
+QEMU_BUILD_BUG_ON(OPC_BUF_SIZE >= 0x7fff);
+QEMU_BUILD_BUG_ON(OPPARAM_BUF_SIZE >= 0x7fff);
+
struct TCGContext {
uint8_t *pool_cur, *pool_end;
TCGPool *pool_first, *pool_current, *pool_first_large;
- TCGLabel *labels;
int nb_labels;
int nb_globals;
int nb_temps;
@@ -469,9 +487,6 @@ struct TCGContext {
corresponding output argument needs to be
sync to memory. */
- /* tells in which temporary a given register is. It does not take
- into account fixed registers */
- int reg_to_temp[TCG_TARGET_NB_REGS];
TCGRegSet reserved_regs;
intptr_t current_frame_offset;
intptr_t frame_start;
@@ -479,8 +494,6 @@ struct TCGContext {
int frame_reg;
tcg_insn_unit *code_ptr;
- TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
- TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
GHashTable *helpers;
@@ -508,14 +521,10 @@ struct TCGContext {
int goto_tb_issue_mask;
#endif
- uint16_t gen_opc_buf[OPC_BUF_SIZE];
- TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];
-
- uint16_t *gen_opc_ptr;
- TCGArg *gen_opparam_ptr;
- target_ulong gen_opc_pc[OPC_BUF_SIZE];
- uint16_t gen_opc_icount[OPC_BUF_SIZE];
- uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
+ int gen_first_op_idx;
+ int gen_last_op_idx;
+ int gen_next_op_idx;
+ int gen_next_parm_idx;
/* Code generation. Note that we specifically do not use tcg_insn_unit
here, because there's too much arithmetic throughout that relies
@@ -533,10 +542,38 @@ struct TCGContext {
/* The TCGBackendData structure is private to tcg-target.c. */
struct TCGBackendData *be;
+
+ TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
+ TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
+
+ /* tells in which temporary a given register is. It does not take
+ into account fixed registers */
+ int reg_to_temp[TCG_TARGET_NB_REGS];
+
+ TCGOp gen_op_buf[OPC_BUF_SIZE];
+ TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];
+
+ target_ulong gen_opc_pc[OPC_BUF_SIZE];
+ uint16_t gen_opc_icount[OPC_BUF_SIZE];
+ uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
+
+ TCGLabel labels[TCG_MAX_LABELS];
};
extern TCGContext tcg_ctx;
+/* The number of opcodes emitted so far. */
+static inline int tcg_op_buf_count(void)
+{
+ return tcg_ctx.gen_next_op_idx;
+}
+
+/* Test for whether to terminate the TB for using too many opcodes. */
+static inline bool tcg_op_buf_full(void)
+{
+ return tcg_op_buf_count() >= OPC_MAX_SIZE;
+}
+
/* pool based memory allocation */
void *tcg_malloc_internal(TCGContext *s, int size);
@@ -706,11 +743,8 @@ void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
void tcg_gen_callN(TCGContext *s, void *func,
TCGArg ret, int nargs, TCGArg *args);
-void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
- int c, int right, int arith);
-
-TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args,
- TCGOpDef *tcg_op_def);
+void tcg_op_remove(TCGContext *s, TCGOp *op);
+void tcg_optimize(TCGContext *s);
/* only used for debugging purposes */
void tcg_dump_ops(TCGContext *s);
diff --git a/tci.c b/tci.c
index 4711ee4817..28292b308e 100644
--- a/tci.c
+++ b/tci.c
@@ -506,19 +506,6 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
tb_ptr += 2;
switch (opc) {
- case INDEX_op_end:
- case INDEX_op_nop:
- break;
- case INDEX_op_nop1:
- case INDEX_op_nop2:
- case INDEX_op_nop3:
- case INDEX_op_nopn:
- case INDEX_op_discard:
- TODO();
- break;
- case INDEX_op_set_label:
- TODO();
- break;
case INDEX_op_call:
t0 = tci_read_ri(&tb_ptr);
#if TCG_TARGET_REG_BITS == 32
diff --git a/tests/Makefile b/tests/Makefile
index a68ff898de..307035c26c 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -70,6 +70,8 @@ check-unit-y += tests/check-qom-interface$(EXESUF)
gcov-files-check-qom-interface-y = qom/object.c
check-unit-y += tests/test-qemu-opts$(EXESUF)
gcov-files-test-qemu-opts-y = qom/test-qemu-opts.c
+check-unit-y += tests/test-write-threshold$(EXESUF)
+gcov-files-test-write-threshold-y = block/write-threshold.c
check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh
@@ -308,9 +310,10 @@ tests/test-mul64$(EXESUF): tests/test-mul64.o libqemuutil.a
tests/test-bitops$(EXESUF): tests/test-bitops.o libqemuutil.a
libqos-obj-y = tests/libqos/pci.o tests/libqos/fw_cfg.o tests/libqos/malloc.o
-libqos-obj-y += tests/libqos/i2c.o
+libqos-obj-y += tests/libqos/i2c.o tests/libqos/libqos.o
libqos-pc-obj-y = $(libqos-obj-y) tests/libqos/pci-pc.o
-libqos-pc-obj-y += tests/libqos/malloc-pc.o
+libqos-pc-obj-y += tests/libqos/malloc-pc.o tests/libqos/libqos-pc.o
+libqos-pc-obj-y += tests/libqos/ahci.o
libqos-omap-obj-y = $(libqos-obj-y) tests/libqos/i2c-omap.o
libqos-virtio-obj-y = $(libqos-obj-y) $(libqos-pc-obj-y) tests/libqos/virtio.o tests/libqos/virtio-pci.o
libqos-usb-obj-y = $(libqos-pc-obj-y) tests/libqos/usb.o
@@ -363,6 +366,7 @@ tests/usb-hcd-xhci-test$(EXESUF): tests/usb-hcd-xhci-test.o $(libqos-usb-obj-y)
tests/vhost-user-test$(EXESUF): tests/vhost-user-test.o qemu-char.o qemu-timer.o $(qtest-obj-y)
tests/qemu-iotests/socket_scm_helper$(EXESUF): tests/qemu-iotests/socket_scm_helper.o
tests/test-qemu-opts$(EXESUF): tests/test-qemu-opts.o libqemuutil.a libqemustub.a
+tests/test-write-threshold$(EXESUF): tests/test-write-threshold.o $(block-obj-y) libqemuutil.a libqemustub.a
ifeq ($(CONFIG_POSIX),y)
LIBS += -lutil
diff --git a/tests/ahci-test.c b/tests/ahci-test.c
index b1a59f21a7..53fd068c8a 100644
--- a/tests/ahci-test.c
+++ b/tests/ahci-test.c
@@ -29,8 +29,9 @@
#include <glib.h>
#include "libqtest.h"
+#include "libqos/libqos-pc.h"
+#include "libqos/ahci.h"
#include "libqos/pci-pc.h"
-#include "libqos/malloc-pc.h"
#include "qemu-common.h"
#include "qemu/host-utils.h"
@@ -41,361 +42,18 @@
/* Test-specific defines. */
#define TEST_IMAGE_SIZE (64 * 1024 * 1024)
-/*** Supplementary PCI Config Space IDs & Masks ***/
-#define PCI_DEVICE_ID_INTEL_Q35_AHCI (0x2922)
-#define PCI_MSI_FLAGS_RESERVED (0xFF00)
-#define PCI_PM_CTRL_RESERVED (0xFC)
-#define PCI_BCC(REG32) ((REG32) >> 24)
-#define PCI_PI(REG32) (((REG32) >> 8) & 0xFF)
-#define PCI_SCC(REG32) (((REG32) >> 16) & 0xFF)
-
-/*** Recognized AHCI Device Types ***/
-#define AHCI_INTEL_ICH9 (PCI_DEVICE_ID_INTEL_Q35_AHCI << 16 | \
- PCI_VENDOR_ID_INTEL)
-
-/*** AHCI/HBA Register Offsets and Bitmasks ***/
-#define AHCI_CAP (0)
-#define AHCI_CAP_NP (0x1F)
-#define AHCI_CAP_SXS (0x20)
-#define AHCI_CAP_EMS (0x40)
-#define AHCI_CAP_CCCS (0x80)
-#define AHCI_CAP_NCS (0x1F00)
-#define AHCI_CAP_PSC (0x2000)
-#define AHCI_CAP_SSC (0x4000)
-#define AHCI_CAP_PMD (0x8000)
-#define AHCI_CAP_FBSS (0x10000)
-#define AHCI_CAP_SPM (0x20000)
-#define AHCI_CAP_SAM (0x40000)
-#define AHCI_CAP_RESERVED (0x80000)
-#define AHCI_CAP_ISS (0xF00000)
-#define AHCI_CAP_SCLO (0x1000000)
-#define AHCI_CAP_SAL (0x2000000)
-#define AHCI_CAP_SALP (0x4000000)
-#define AHCI_CAP_SSS (0x8000000)
-#define AHCI_CAP_SMPS (0x10000000)
-#define AHCI_CAP_SSNTF (0x20000000)
-#define AHCI_CAP_SNCQ (0x40000000)
-#define AHCI_CAP_S64A (0x80000000)
-
-#define AHCI_GHC (1)
-#define AHCI_GHC_HR (0x01)
-#define AHCI_GHC_IE (0x02)
-#define AHCI_GHC_MRSM (0x04)
-#define AHCI_GHC_RESERVED (0x7FFFFFF8)
-#define AHCI_GHC_AE (0x80000000)
-
-#define AHCI_IS (2)
-#define AHCI_PI (3)
-#define AHCI_VS (4)
-
-#define AHCI_CCCCTL (5)
-#define AHCI_CCCCTL_EN (0x01)
-#define AHCI_CCCCTL_RESERVED (0x06)
-#define AHCI_CCCCTL_CC (0xFF00)
-#define AHCI_CCCCTL_TV (0xFFFF0000)
-
-#define AHCI_CCCPORTS (6)
-#define AHCI_EMLOC (7)
-
-#define AHCI_EMCTL (8)
-#define AHCI_EMCTL_STSMR (0x01)
-#define AHCI_EMCTL_CTLTM (0x100)
-#define AHCI_EMCTL_CTLRST (0x200)
-#define AHCI_EMCTL_RESERVED (0xF0F0FCFE)
-
-#define AHCI_CAP2 (9)
-#define AHCI_CAP2_BOH (0x01)
-#define AHCI_CAP2_NVMP (0x02)
-#define AHCI_CAP2_APST (0x04)
-#define AHCI_CAP2_RESERVED (0xFFFFFFF8)
-
-#define AHCI_BOHC (10)
-#define AHCI_RESERVED (11)
-#define AHCI_NVMHCI (24)
-#define AHCI_VENDOR (40)
-#define AHCI_PORTS (64)
-
-/*** Port Memory Offsets & Bitmasks ***/
-#define AHCI_PX_CLB (0)
-#define AHCI_PX_CLB_RESERVED (0x1FF)
-
-#define AHCI_PX_CLBU (1)
-
-#define AHCI_PX_FB (2)
-#define AHCI_PX_FB_RESERVED (0xFF)
-
-#define AHCI_PX_FBU (3)
-
-#define AHCI_PX_IS (4)
-#define AHCI_PX_IS_DHRS (0x1)
-#define AHCI_PX_IS_PSS (0x2)
-#define AHCI_PX_IS_DSS (0x4)
-#define AHCI_PX_IS_SDBS (0x8)
-#define AHCI_PX_IS_UFS (0x10)
-#define AHCI_PX_IS_DPS (0x20)
-#define AHCI_PX_IS_PCS (0x40)
-#define AHCI_PX_IS_DMPS (0x80)
-#define AHCI_PX_IS_RESERVED (0x23FFF00)
-#define AHCI_PX_IS_PRCS (0x400000)
-#define AHCI_PX_IS_IPMS (0x800000)
-#define AHCI_PX_IS_OFS (0x1000000)
-#define AHCI_PX_IS_INFS (0x4000000)
-#define AHCI_PX_IS_IFS (0x8000000)
-#define AHCI_PX_IS_HBDS (0x10000000)
-#define AHCI_PX_IS_HBFS (0x20000000)
-#define AHCI_PX_IS_TFES (0x40000000)
-#define AHCI_PX_IS_CPDS (0x80000000)
-
-#define AHCI_PX_IE (5)
-#define AHCI_PX_IE_DHRE (0x1)
-#define AHCI_PX_IE_PSE (0x2)
-#define AHCI_PX_IE_DSE (0x4)
-#define AHCI_PX_IE_SDBE (0x8)
-#define AHCI_PX_IE_UFE (0x10)
-#define AHCI_PX_IE_DPE (0x20)
-#define AHCI_PX_IE_PCE (0x40)
-#define AHCI_PX_IE_DMPE (0x80)
-#define AHCI_PX_IE_RESERVED (0x23FFF00)
-#define AHCI_PX_IE_PRCE (0x400000)
-#define AHCI_PX_IE_IPME (0x800000)
-#define AHCI_PX_IE_OFE (0x1000000)
-#define AHCI_PX_IE_INFE (0x4000000)
-#define AHCI_PX_IE_IFE (0x8000000)
-#define AHCI_PX_IE_HBDE (0x10000000)
-#define AHCI_PX_IE_HBFE (0x20000000)
-#define AHCI_PX_IE_TFEE (0x40000000)
-#define AHCI_PX_IE_CPDE (0x80000000)
-
-#define AHCI_PX_CMD (6)
-#define AHCI_PX_CMD_ST (0x1)
-#define AHCI_PX_CMD_SUD (0x2)
-#define AHCI_PX_CMD_POD (0x4)
-#define AHCI_PX_CMD_CLO (0x8)
-#define AHCI_PX_CMD_FRE (0x10)
-#define AHCI_PX_CMD_RESERVED (0xE0)
-#define AHCI_PX_CMD_CCS (0x1F00)
-#define AHCI_PX_CMD_MPSS (0x2000)
-#define AHCI_PX_CMD_FR (0x4000)
-#define AHCI_PX_CMD_CR (0x8000)
-#define AHCI_PX_CMD_CPS (0x10000)
-#define AHCI_PX_CMD_PMA (0x20000)
-#define AHCI_PX_CMD_HPCP (0x40000)
-#define AHCI_PX_CMD_MPSP (0x80000)
-#define AHCI_PX_CMD_CPD (0x100000)
-#define AHCI_PX_CMD_ESP (0x200000)
-#define AHCI_PX_CMD_FBSCP (0x400000)
-#define AHCI_PX_CMD_APSTE (0x800000)
-#define AHCI_PX_CMD_ATAPI (0x1000000)
-#define AHCI_PX_CMD_DLAE (0x2000000)
-#define AHCI_PX_CMD_ALPE (0x4000000)
-#define AHCI_PX_CMD_ASP (0x8000000)
-#define AHCI_PX_CMD_ICC (0xF0000000)
-
-#define AHCI_PX_RES1 (7)
-
-#define AHCI_PX_TFD (8)
-#define AHCI_PX_TFD_STS (0xFF)
-#define AHCI_PX_TFD_STS_ERR (0x01)
-#define AHCI_PX_TFD_STS_CS1 (0x06)
-#define AHCI_PX_TFD_STS_DRQ (0x08)
-#define AHCI_PX_TFD_STS_CS2 (0x70)
-#define AHCI_PX_TFD_STS_BSY (0x80)
-#define AHCI_PX_TFD_ERR (0xFF00)
-#define AHCI_PX_TFD_RESERVED (0xFFFF0000)
-
-#define AHCI_PX_SIG (9)
-#define AHCI_PX_SIG_SECTOR_COUNT (0xFF)
-#define AHCI_PX_SIG_LBA_LOW (0xFF00)
-#define AHCI_PX_SIG_LBA_MID (0xFF0000)
-#define AHCI_PX_SIG_LBA_HIGH (0xFF000000)
-
-#define AHCI_PX_SSTS (10)
-#define AHCI_PX_SSTS_DET (0x0F)
-#define AHCI_PX_SSTS_SPD (0xF0)
-#define AHCI_PX_SSTS_IPM (0xF00)
-#define AHCI_PX_SSTS_RESERVED (0xFFFFF000)
-#define SSTS_DET_NO_DEVICE (0x00)
-#define SSTS_DET_PRESENT (0x01)
-#define SSTS_DET_ESTABLISHED (0x03)
-#define SSTS_DET_OFFLINE (0x04)
-
-#define AHCI_PX_SCTL (11)
-
-#define AHCI_PX_SERR (12)
-#define AHCI_PX_SERR_ERR (0xFFFF)
-#define AHCI_PX_SERR_DIAG (0xFFFF0000)
-#define AHCI_PX_SERR_DIAG_X (0x04000000)
-
-#define AHCI_PX_SACT (13)
-#define AHCI_PX_CI (14)
-#define AHCI_PX_SNTF (15)
-
-#define AHCI_PX_FBS (16)
-#define AHCI_PX_FBS_EN (0x1)
-#define AHCI_PX_FBS_DEC (0x2)
-#define AHCI_PX_FBS_SDE (0x4)
-#define AHCI_PX_FBS_DEV (0xF00)
-#define AHCI_PX_FBS_ADO (0xF000)
-#define AHCI_PX_FBS_DWE (0xF0000)
-#define AHCI_PX_FBS_RESERVED (0xFFF000F8)
-
-#define AHCI_PX_RES2 (17)
-#define AHCI_PX_VS (28)
-
-#define HBA_DATA_REGION_SIZE (256)
-#define HBA_PORT_DATA_SIZE (128)
-#define HBA_PORT_NUM_REG (HBA_PORT_DATA_SIZE/4)
-
-#define AHCI_VERSION_0_95 (0x00000905)
-#define AHCI_VERSION_1_0 (0x00010000)
-#define AHCI_VERSION_1_1 (0x00010100)
-#define AHCI_VERSION_1_2 (0x00010200)
-#define AHCI_VERSION_1_3 (0x00010300)
-
-/*** Structures ***/
-
-/**
- * Generic FIS structure.
- */
-typedef struct FIS {
- uint8_t fis_type;
- uint8_t flags;
- char data[0];
-} __attribute__((__packed__)) FIS;
-
-/**
- * Register device-to-host FIS structure.
- */
-typedef struct RegD2HFIS {
- /* DW0 */
- uint8_t fis_type;
- uint8_t flags;
- uint8_t status;
- uint8_t error;
- /* DW1 */
- uint8_t lba_low;
- uint8_t lba_mid;
- uint8_t lba_high;
- uint8_t device;
- /* DW2 */
- uint8_t lba3;
- uint8_t lba4;
- uint8_t lba5;
- uint8_t res1;
- /* DW3 */
- uint16_t count;
- uint8_t res2;
- uint8_t res3;
- /* DW4 */
- uint16_t res4;
- uint16_t res5;
-} __attribute__((__packed__)) RegD2HFIS;
-
-/**
- * Register host-to-device FIS structure.
- */
-typedef struct RegH2DFIS {
- /* DW0 */
- uint8_t fis_type;
- uint8_t flags;
- uint8_t command;
- uint8_t feature_low;
- /* DW1 */
- uint8_t lba_low;
- uint8_t lba_mid;
- uint8_t lba_high;
- uint8_t device;
- /* DW2 */
- uint8_t lba3;
- uint8_t lba4;
- uint8_t lba5;
- uint8_t feature_high;
- /* DW3 */
- uint16_t count;
- uint8_t icc;
- uint8_t control;
- /* DW4 */
- uint32_t aux;
-} __attribute__((__packed__)) RegH2DFIS;
-
-/**
- * Command List entry structure.
- * The command list contains between 1-32 of these structures.
- */
-typedef struct AHCICommand {
- uint8_t b1;
- uint8_t b2;
- uint16_t prdtl; /* Phys Region Desc. Table Length */
- uint32_t prdbc; /* Phys Region Desc. Byte Count */
- uint32_t ctba; /* Command Table Descriptor Base Address */
- uint32_t ctbau; /* '' Upper */
- uint32_t res[4];
-} __attribute__((__packed__)) AHCICommand;
-
-/**
- * Physical Region Descriptor; pointed to by the Command List Header,
- * struct ahci_command.
- */
-typedef struct PRD {
- uint32_t dba; /* Data Base Address */
- uint32_t dbau; /* Data Base Address Upper */
- uint32_t res; /* Reserved */
- uint32_t dbc; /* Data Byte Count (0-indexed) & Interrupt Flag (bit 2^31) */
-} PRD;
-
-typedef struct HBACap {
- uint32_t cap;
- uint32_t cap2;
-} HBACap;
-
/*** Globals ***/
-static QGuestAllocator *guest_malloc;
-static QPCIBus *pcibus;
-static uint64_t barsize;
static char tmp_path[] = "/tmp/qtest.XXXXXX";
static bool ahci_pedantic;
-static uint32_t ahci_fingerprint;
-
-/*** Macro Utilities ***/
-#define BITANY(data, mask) (((data) & (mask)) != 0)
-#define BITSET(data, mask) (((data) & (mask)) == (mask))
-#define BITCLR(data, mask) (((data) & (mask)) == 0)
-#define ASSERT_BIT_SET(data, mask) g_assert_cmphex((data) & (mask), ==, (mask))
-#define ASSERT_BIT_CLEAR(data, mask) g_assert_cmphex((data) & (mask), ==, 0)
-
-/*** IO macros for the AHCI memory registers. ***/
-#define AHCI_READ(OFST) qpci_io_readl(ahci, hba_base + (OFST))
-#define AHCI_WRITE(OFST, VAL) qpci_io_writel(ahci, hba_base + (OFST), (VAL))
-#define AHCI_RREG(regno) AHCI_READ(4 * (regno))
-#define AHCI_WREG(regno, val) AHCI_WRITE(4 * (regno), (val))
-#define AHCI_SET(regno, mask) AHCI_WREG((regno), AHCI_RREG(regno) | (mask))
-#define AHCI_CLR(regno, mask) AHCI_WREG((regno), AHCI_RREG(regno) & ~(mask))
-
-/*** IO macros for port-specific offsets inside of AHCI memory. ***/
-#define PX_OFST(port, regno) (HBA_PORT_NUM_REG * (port) + AHCI_PORTS + (regno))
-#define PX_RREG(port, regno) AHCI_RREG(PX_OFST((port), (regno)))
-#define PX_WREG(port, regno, val) AHCI_WREG(PX_OFST((port), (regno)), (val))
-#define PX_SET(port, reg, mask) PX_WREG((port), (reg), \
- PX_RREG((port), (reg)) | (mask));
-#define PX_CLR(port, reg, mask) PX_WREG((port), (reg), \
- PX_RREG((port), (reg)) & ~(mask));
-
-/* For calculating how big the PRD table needs to be: */
-#define CMD_TBL_SIZ(n) ((0x80 + ((n) * sizeof(PRD)) + 0x7F) & ~0x7F)
-
/*** Function Declarations ***/
-static QPCIDevice *get_ahci_device(void);
-static QPCIDevice *start_ahci_device(QPCIDevice *dev, void **hba_base);
-static void free_ahci_device(QPCIDevice *dev);
-static void ahci_test_port_spec(QPCIDevice *ahci, void *hba_base,
- HBACap *hcap, uint8_t port);
-static void ahci_test_pci_spec(QPCIDevice *ahci);
-static void ahci_test_pci_caps(QPCIDevice *ahci, uint16_t header,
+static void ahci_test_port_spec(AHCIQState *ahci, uint8_t port);
+static void ahci_test_pci_spec(AHCIQState *ahci);
+static void ahci_test_pci_caps(AHCIQState *ahci, uint16_t header,
uint8_t offset);
-static void ahci_test_satacap(QPCIDevice *ahci, uint8_t offset);
-static void ahci_test_msicap(QPCIDevice *ahci, uint8_t offset);
-static void ahci_test_pmcap(QPCIDevice *ahci, uint8_t offset);
+static void ahci_test_satacap(AHCIQState *ahci, uint8_t offset);
+static void ahci_test_msicap(AHCIQState *ahci, uint8_t offset);
+static void ahci_test_pmcap(AHCIQState *ahci, uint8_t offset);
/*** Utilities ***/
@@ -410,274 +68,43 @@ static void string_bswap16(uint16_t *s, size_t bytes)
}
}
-/**
- * Locate, verify, and return a handle to the AHCI device.
- */
-static QPCIDevice *get_ahci_device(void)
-{
- QPCIDevice *ahci;
-
- pcibus = qpci_init_pc();
-
- /* Find the AHCI PCI device and verify it's the right one. */
- ahci = qpci_device_find(pcibus, QPCI_DEVFN(0x1F, 0x02));
- g_assert(ahci != NULL);
-
- ahci_fingerprint = qpci_config_readl(ahci, PCI_VENDOR_ID);
-
- switch (ahci_fingerprint) {
- case AHCI_INTEL_ICH9:
- break;
- default:
- /* Unknown device. */
- g_assert_not_reached();
- }
-
- return ahci;
-}
-
-static void free_ahci_device(QPCIDevice *ahci)
-{
- /* libqos doesn't have a function for this, so free it manually */
- g_free(ahci);
-
- if (pcibus) {
- qpci_free_pc(pcibus);
- pcibus = NULL;
- }
-
- /* Clear our cached barsize information. */
- barsize = 0;
-}
-
/*** Test Setup & Teardown ***/
/**
- * Launch QEMU with the given command line,
- * and then set up interrupts and our guest malloc interface.
- */
-static void qtest_boot(const char *cmdline_fmt, ...)
-{
- va_list ap;
- char *cmdline;
-
- va_start(ap, cmdline_fmt);
- cmdline = g_strdup_vprintf(cmdline_fmt, ap);
- va_end(ap);
-
- qtest_start(cmdline);
- qtest_irq_intercept_in(global_qtest, "ioapic");
- guest_malloc = pc_alloc_init();
-
- g_free(cmdline);
-}
-
-/**
- * Tear down the QEMU instance.
- */
-static void qtest_shutdown(void)
-{
- g_free(guest_malloc);
- guest_malloc = NULL;
- qtest_end();
-}
-
-/**
* Start a Q35 machine and bookmark a handle to the AHCI device.
*/
-static QPCIDevice *ahci_boot(void)
-{
- qtest_boot("-drive if=none,id=drive0,file=%s,cache=writeback,serial=%s,"
- "format=raw"
- " -M q35 "
- "-device ide-hd,drive=drive0 "
- "-global ide-hd.ver=%s",
- tmp_path, "testdisk", "version");
-
- /* Verify that we have an AHCI device present. */
- return get_ahci_device();
-}
-
-/**
- * Clean up the PCI device, then terminate the QEMU instance.
- */
-static void ahci_shutdown(QPCIDevice *ahci)
-{
- free_ahci_device(ahci);
- qtest_shutdown();
-}
-
-/*** Logical Device Initialization ***/
-
-/**
- * Start the PCI device and sanity-check default operation.
- */
-static void ahci_pci_enable(QPCIDevice *ahci, void **hba_base)
+static AHCIQState *ahci_boot(void)
{
- uint8_t reg;
+ AHCIQState *s;
+ const char *cli;
- start_ahci_device(ahci, hba_base);
+ s = g_malloc0(sizeof(AHCIQState));
- switch (ahci_fingerprint) {
- case AHCI_INTEL_ICH9:
- /* ICH9 has a register at PCI 0x92 that
- * acts as a master port enabler mask. */
- reg = qpci_config_readb(ahci, 0x92);
- reg |= 0x3F;
- qpci_config_writeb(ahci, 0x92, reg);
- /* 0...0111111b -- bit significant, ports 0-5 enabled. */
- ASSERT_BIT_SET(qpci_config_readb(ahci, 0x92), 0x3F);
- break;
- }
-
-}
+ cli = "-drive if=none,id=drive0,file=%s,cache=writeback,serial=%s"
+ ",format=raw"
+ " -M q35 "
+ "-device ide-hd,drive=drive0 "
+ "-global ide-hd.ver=%s";
+ s->parent = qtest_pc_boot(cli, tmp_path, "testdisk", "version");
+ alloc_set_flags(s->parent->alloc, ALLOC_LEAK_ASSERT);
-/**
- * Map BAR5/ABAR, and engage the PCI device.
- */
-static QPCIDevice *start_ahci_device(QPCIDevice *ahci, void **hba_base)
-{
- /* Map AHCI's ABAR (BAR5) */
- *hba_base = qpci_iomap(ahci, 5, &barsize);
-
- /* turns on pci.cmd.iose, pci.cmd.mse and pci.cmd.bme */
- qpci_device_enable(ahci);
+ /* Verify that we have an AHCI device present. */
+ s->dev = get_ahci_device(&s->fingerprint);
- return ahci;
+ return s;
}
/**
- * Test and initialize the AHCI's HBA memory areas.
- * Initialize and start any ports with devices attached.
- * Bring the HBA into the idle state.
+ * Clean up the PCI device, then terminate the QEMU instance.
*/
-static void ahci_hba_enable(QPCIDevice *ahci, void *hba_base)
+static void ahci_shutdown(AHCIQState *ahci)
{
- /* Bits of interest in this section:
- * GHC.AE Global Host Control / AHCI Enable
- * PxCMD.ST Port Command: Start
- * PxCMD.SUD "Spin Up Device"
- * PxCMD.POD "Power On Device"
- * PxCMD.FRE "FIS Receive Enable"
- * PxCMD.FR "FIS Receive Running"
- * PxCMD.CR "Command List Running"
- */
-
- g_assert(ahci != NULL);
- g_assert(hba_base != NULL);
-
- uint32_t reg, ports_impl, clb, fb;
- uint16_t i;
- uint8_t num_cmd_slots;
-
- g_assert(hba_base != 0);
-
- /* Set GHC.AE to 1 */
- AHCI_SET(AHCI_GHC, AHCI_GHC_AE);
- reg = AHCI_RREG(AHCI_GHC);
- ASSERT_BIT_SET(reg, AHCI_GHC_AE);
-
- /* Read CAP.NCS, how many command slots do we have? */
- reg = AHCI_RREG(AHCI_CAP);
- num_cmd_slots = ((reg & AHCI_CAP_NCS) >> ctzl(AHCI_CAP_NCS)) + 1;
- g_test_message("Number of Command Slots: %u", num_cmd_slots);
-
- /* Determine which ports are implemented. */
- ports_impl = AHCI_RREG(AHCI_PI);
-
- for (i = 0; ports_impl; ports_impl >>= 1, ++i) {
- if (!(ports_impl & 0x01)) {
- continue;
- }
-
- g_test_message("Initializing port %u", i);
-
- reg = PX_RREG(i, AHCI_PX_CMD);
- if (BITCLR(reg, AHCI_PX_CMD_ST | AHCI_PX_CMD_CR |
- AHCI_PX_CMD_FRE | AHCI_PX_CMD_FR)) {
- g_test_message("port is idle");
- } else {
- g_test_message("port needs to be idled");
- PX_CLR(i, AHCI_PX_CMD, (AHCI_PX_CMD_ST | AHCI_PX_CMD_FRE));
- /* The port has 500ms to disengage. */
- usleep(500000);
- reg = PX_RREG(i, AHCI_PX_CMD);
- ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_CR);
- ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_FR);
- g_test_message("port is now idle");
- /* The spec does allow for possibly needing a PORT RESET
- * or HBA reset if we fail to idle the port. */
- }
-
- /* Allocate Memory for the Command List Buffer & FIS Buffer */
- /* PxCLB space ... 0x20 per command, as in 4.2.2 p 36 */
- clb = guest_alloc(guest_malloc, num_cmd_slots * 0x20);
- g_test_message("CLB: 0x%08x", clb);
- PX_WREG(i, AHCI_PX_CLB, clb);
- g_assert_cmphex(clb, ==, PX_RREG(i, AHCI_PX_CLB));
-
- /* PxFB space ... 0x100, as in 4.2.1 p 35 */
- fb = guest_alloc(guest_malloc, 0x100);
- g_test_message("FB: 0x%08x", fb);
- PX_WREG(i, AHCI_PX_FB, fb);
- g_assert_cmphex(fb, ==, PX_RREG(i, AHCI_PX_FB));
-
- /* Clear PxSERR, PxIS, then IS.IPS[x] by writing '1's. */
- PX_WREG(i, AHCI_PX_SERR, 0xFFFFFFFF);
- PX_WREG(i, AHCI_PX_IS, 0xFFFFFFFF);
- AHCI_WREG(AHCI_IS, (1 << i));
-
- /* Verify Interrupts Cleared */
- reg = PX_RREG(i, AHCI_PX_SERR);
- g_assert_cmphex(reg, ==, 0);
-
- reg = PX_RREG(i, AHCI_PX_IS);
- g_assert_cmphex(reg, ==, 0);
-
- reg = AHCI_RREG(AHCI_IS);
- ASSERT_BIT_CLEAR(reg, (1 << i));
-
- /* Enable All Interrupts: */
- PX_WREG(i, AHCI_PX_IE, 0xFFFFFFFF);
- reg = PX_RREG(i, AHCI_PX_IE);
- g_assert_cmphex(reg, ==, ~((uint32_t)AHCI_PX_IE_RESERVED));
-
- /* Enable the FIS Receive Engine. */
- PX_SET(i, AHCI_PX_CMD, AHCI_PX_CMD_FRE);
- reg = PX_RREG(i, AHCI_PX_CMD);
- ASSERT_BIT_SET(reg, AHCI_PX_CMD_FR);
-
- /* AHCI 1.3 spec: if !STS.BSY, !STS.DRQ and PxSSTS.DET indicates
- * physical presence, a device is present and may be started. However,
- * PxSERR.DIAG.X /may/ need to be cleared a priori. */
- reg = PX_RREG(i, AHCI_PX_SERR);
- if (BITSET(reg, AHCI_PX_SERR_DIAG_X)) {
- PX_SET(i, AHCI_PX_SERR, AHCI_PX_SERR_DIAG_X);
- }
-
- reg = PX_RREG(i, AHCI_PX_TFD);
- if (BITCLR(reg, AHCI_PX_TFD_STS_BSY | AHCI_PX_TFD_STS_DRQ)) {
- reg = PX_RREG(i, AHCI_PX_SSTS);
- if ((reg & AHCI_PX_SSTS_DET) == SSTS_DET_ESTABLISHED) {
- /* Device Found: set PxCMD.ST := 1 */
- PX_SET(i, AHCI_PX_CMD, AHCI_PX_CMD_ST);
- ASSERT_BIT_SET(PX_RREG(i, AHCI_PX_CMD), AHCI_PX_CMD_CR);
- g_test_message("Started Device %u", i);
- } else if ((reg & AHCI_PX_SSTS_DET)) {
- /* Device present, but in some unknown state. */
- g_assert_not_reached();
- }
- }
- }
+ QOSState *qs = ahci->parent;
- /* Enable GHC.IE */
- AHCI_SET(AHCI_GHC, AHCI_GHC_IE);
- reg = AHCI_RREG(AHCI_GHC);
- ASSERT_BIT_SET(reg, AHCI_GHC_IE);
-
- /* TODO: The device should now be idling and waiting for commands.
- * In the future, a small test-case to inspect the Register D2H FIS
- * and clear the initial interrupts might be good. */
+ ahci_clean_mem(ahci);
+ free_ahci_device(ahci->dev);
+ g_free(ahci);
+ qtest_shutdown(qs);
}
/*** Specification Adherence Tests ***/
@@ -685,14 +112,14 @@ static void ahci_hba_enable(QPCIDevice *ahci, void *hba_base)
/**
* Implementation for test_pci_spec. Ensures PCI configuration space is sane.
*/
-static void ahci_test_pci_spec(QPCIDevice *ahci)
+static void ahci_test_pci_spec(AHCIQState *ahci)
{
uint8_t datab;
uint16_t data;
uint32_t datal;
/* Most of these bits should start cleared until we turn them on. */
- data = qpci_config_readw(ahci, PCI_COMMAND);
+ data = qpci_config_readw(ahci->dev, PCI_COMMAND);
ASSERT_BIT_CLEAR(data, PCI_COMMAND_MEMORY);
ASSERT_BIT_CLEAR(data, PCI_COMMAND_MASTER);
ASSERT_BIT_CLEAR(data, PCI_COMMAND_SPECIAL); /* Reserved */
@@ -704,7 +131,7 @@ static void ahci_test_pci_spec(QPCIDevice *ahci)
ASSERT_BIT_CLEAR(data, PCI_COMMAND_INTX_DISABLE);
ASSERT_BIT_CLEAR(data, 0xF800); /* Reserved */
- data = qpci_config_readw(ahci, PCI_STATUS);
+ data = qpci_config_readw(ahci->dev, PCI_STATUS);
ASSERT_BIT_CLEAR(data, 0x01 | 0x02 | 0x04); /* Reserved */
ASSERT_BIT_CLEAR(data, PCI_STATUS_INTERRUPT);
ASSERT_BIT_SET(data, PCI_STATUS_CAP_LIST); /* must be set */
@@ -717,7 +144,7 @@ static void ahci_test_pci_spec(QPCIDevice *ahci)
ASSERT_BIT_CLEAR(data, PCI_STATUS_DETECTED_PARITY);
/* RID occupies the low byte, CCs occupy the high three. */
- datal = qpci_config_readl(ahci, PCI_CLASS_REVISION);
+ datal = qpci_config_readl(ahci->dev, PCI_CLASS_REVISION);
if (ahci_pedantic) {
/* AHCI 1.3 specifies that at-boot, the RID should reset to 0x00,
* Though in practice this is likely seldom true. */
@@ -740,40 +167,40 @@ static void ahci_test_pci_spec(QPCIDevice *ahci)
g_assert_not_reached();
}
- datab = qpci_config_readb(ahci, PCI_CACHE_LINE_SIZE);
+ datab = qpci_config_readb(ahci->dev, PCI_CACHE_LINE_SIZE);
g_assert_cmphex(datab, ==, 0);
- datab = qpci_config_readb(ahci, PCI_LATENCY_TIMER);
+ datab = qpci_config_readb(ahci->dev, PCI_LATENCY_TIMER);
g_assert_cmphex(datab, ==, 0);
/* Only the bottom 7 bits must be off. */
- datab = qpci_config_readb(ahci, PCI_HEADER_TYPE);
+ datab = qpci_config_readb(ahci->dev, PCI_HEADER_TYPE);
ASSERT_BIT_CLEAR(datab, 0x7F);
/* BIST is optional, but the low 7 bits must always start off regardless. */
- datab = qpci_config_readb(ahci, PCI_BIST);
+ datab = qpci_config_readb(ahci->dev, PCI_BIST);
ASSERT_BIT_CLEAR(datab, 0x7F);
/* BARS 0-4 do not have a boot spec, but ABAR/BAR5 must be clean. */
- datal = qpci_config_readl(ahci, PCI_BASE_ADDRESS_5);
+ datal = qpci_config_readl(ahci->dev, PCI_BASE_ADDRESS_5);
g_assert_cmphex(datal, ==, 0);
- qpci_config_writel(ahci, PCI_BASE_ADDRESS_5, 0xFFFFFFFF);
- datal = qpci_config_readl(ahci, PCI_BASE_ADDRESS_5);
+ qpci_config_writel(ahci->dev, PCI_BASE_ADDRESS_5, 0xFFFFFFFF);
+ datal = qpci_config_readl(ahci->dev, PCI_BASE_ADDRESS_5);
/* ABAR must be 32-bit, memory mapped, non-prefetchable and
* must be >= 512 bytes. To that end, bits 0-8 must be off. */
ASSERT_BIT_CLEAR(datal, 0xFF);
/* Capability list MUST be present, */
- datal = qpci_config_readl(ahci, PCI_CAPABILITY_LIST);
+ datal = qpci_config_readl(ahci->dev, PCI_CAPABILITY_LIST);
/* But these bits are reserved. */
ASSERT_BIT_CLEAR(datal, ~0xFF);
g_assert_cmphex(datal, !=, 0);
/* Check specification adherence for capability extenstions. */
- data = qpci_config_readw(ahci, datal);
+ data = qpci_config_readw(ahci->dev, datal);
- switch (ahci_fingerprint) {
+ switch (ahci->fingerprint) {
case AHCI_INTEL_ICH9:
/* Intel ICH9 Family Datasheet 14.1.19 p.550 */
g_assert_cmphex((data & 0xFF), ==, PCI_CAP_ID_MSI);
@@ -786,18 +213,18 @@ static void ahci_test_pci_spec(QPCIDevice *ahci)
ahci_test_pci_caps(ahci, data, (uint8_t)datal);
/* Reserved. */
- datal = qpci_config_readl(ahci, PCI_CAPABILITY_LIST + 4);
+ datal = qpci_config_readl(ahci->dev, PCI_CAPABILITY_LIST + 4);
g_assert_cmphex(datal, ==, 0);
/* IPIN might vary, but ILINE must be off. */
- datab = qpci_config_readb(ahci, PCI_INTERRUPT_LINE);
+ datab = qpci_config_readb(ahci->dev, PCI_INTERRUPT_LINE);
g_assert_cmphex(datab, ==, 0);
}
/**
* Test PCI capabilities for AHCI specification adherence.
*/
-static void ahci_test_pci_caps(QPCIDevice *ahci, uint16_t header,
+static void ahci_test_pci_caps(AHCIQState *ahci, uint16_t header,
uint8_t offset)
{
uint8_t cid = header & 0xFF;
@@ -821,14 +248,14 @@ static void ahci_test_pci_caps(QPCIDevice *ahci, uint16_t header,
}
if (next) {
- ahci_test_pci_caps(ahci, qpci_config_readw(ahci, next), next);
+ ahci_test_pci_caps(ahci, qpci_config_readw(ahci->dev, next), next);
}
}
/**
* Test SATA PCI capabilitity for AHCI specification adherence.
*/
-static void ahci_test_satacap(QPCIDevice *ahci, uint8_t offset)
+static void ahci_test_satacap(AHCIQState *ahci, uint8_t offset)
{
uint16_t dataw;
uint32_t datal;
@@ -836,11 +263,11 @@ static void ahci_test_satacap(QPCIDevice *ahci, uint8_t offset)
g_test_message("Verifying SATACAP");
/* Assert that the SATACAP version is 1.0, And reserved bits are empty. */
- dataw = qpci_config_readw(ahci, offset + 2);
+ dataw = qpci_config_readw(ahci->dev, offset + 2);
g_assert_cmphex(dataw, ==, 0x10);
/* Grab the SATACR1 register. */
- datal = qpci_config_readw(ahci, offset + 4);
+ datal = qpci_config_readw(ahci->dev, offset + 4);
switch (datal & 0x0F) {
case 0x04: /* BAR0 */
@@ -863,30 +290,30 @@ static void ahci_test_satacap(QPCIDevice *ahci, uint8_t offset)
/**
* Test MSI PCI capability for AHCI specification adherence.
*/
-static void ahci_test_msicap(QPCIDevice *ahci, uint8_t offset)
+static void ahci_test_msicap(AHCIQState *ahci, uint8_t offset)
{
uint16_t dataw;
uint32_t datal;
g_test_message("Verifying MSICAP");
- dataw = qpci_config_readw(ahci, offset + PCI_MSI_FLAGS);
+ dataw = qpci_config_readw(ahci->dev, offset + PCI_MSI_FLAGS);
ASSERT_BIT_CLEAR(dataw, PCI_MSI_FLAGS_ENABLE);
ASSERT_BIT_CLEAR(dataw, PCI_MSI_FLAGS_QSIZE);
ASSERT_BIT_CLEAR(dataw, PCI_MSI_FLAGS_RESERVED);
- datal = qpci_config_readl(ahci, offset + PCI_MSI_ADDRESS_LO);
+ datal = qpci_config_readl(ahci->dev, offset + PCI_MSI_ADDRESS_LO);
g_assert_cmphex(datal, ==, 0);
if (dataw & PCI_MSI_FLAGS_64BIT) {
g_test_message("MSICAP is 64bit");
- datal = qpci_config_readl(ahci, offset + PCI_MSI_ADDRESS_HI);
+ datal = qpci_config_readl(ahci->dev, offset + PCI_MSI_ADDRESS_HI);
g_assert_cmphex(datal, ==, 0);
- dataw = qpci_config_readw(ahci, offset + PCI_MSI_DATA_64);
+ dataw = qpci_config_readw(ahci->dev, offset + PCI_MSI_DATA_64);
g_assert_cmphex(dataw, ==, 0);
} else {
g_test_message("MSICAP is 32bit");
- dataw = qpci_config_readw(ahci, offset + PCI_MSI_DATA_32);
+ dataw = qpci_config_readw(ahci->dev, offset + PCI_MSI_DATA_32);
g_assert_cmphex(dataw, ==, 0);
}
}
@@ -894,36 +321,34 @@ static void ahci_test_msicap(QPCIDevice *ahci, uint8_t offset)
/**
* Test Power Management PCI capability for AHCI specification adherence.
*/
-static void ahci_test_pmcap(QPCIDevice *ahci, uint8_t offset)
+static void ahci_test_pmcap(AHCIQState *ahci, uint8_t offset)
{
uint16_t dataw;
g_test_message("Verifying PMCAP");
- dataw = qpci_config_readw(ahci, offset + PCI_PM_PMC);
+ dataw = qpci_config_readw(ahci->dev, offset + PCI_PM_PMC);
ASSERT_BIT_CLEAR(dataw, PCI_PM_CAP_PME_CLOCK);
ASSERT_BIT_CLEAR(dataw, PCI_PM_CAP_RESERVED);
ASSERT_BIT_CLEAR(dataw, PCI_PM_CAP_D1);
ASSERT_BIT_CLEAR(dataw, PCI_PM_CAP_D2);
- dataw = qpci_config_readw(ahci, offset + PCI_PM_CTRL);
+ dataw = qpci_config_readw(ahci->dev, offset + PCI_PM_CTRL);
ASSERT_BIT_CLEAR(dataw, PCI_PM_CTRL_STATE_MASK);
ASSERT_BIT_CLEAR(dataw, PCI_PM_CTRL_RESERVED);
ASSERT_BIT_CLEAR(dataw, PCI_PM_CTRL_DATA_SEL_MASK);
ASSERT_BIT_CLEAR(dataw, PCI_PM_CTRL_DATA_SCALE_MASK);
}
-static void ahci_test_hba_spec(QPCIDevice *ahci, void *hba_base)
+static void ahci_test_hba_spec(AHCIQState *ahci)
{
- HBACap hcap;
unsigned i;
- uint32_t cap, cap2, reg;
+ uint32_t reg;
uint32_t ports;
uint8_t nports_impl;
uint8_t maxports;
- g_assert(ahci != 0);
- g_assert(hba_base != 0);
+ g_assert(ahci != NULL);
/*
* Note that the AHCI spec does expect the BIOS to set up a few things:
@@ -946,15 +371,15 @@ static void ahci_test_hba_spec(QPCIDevice *ahci, void *hba_base)
*/
/* 1 CAP - Capabilities Register */
- cap = AHCI_RREG(AHCI_CAP);
- ASSERT_BIT_CLEAR(cap, AHCI_CAP_RESERVED);
+ ahci->cap = ahci_rreg(ahci, AHCI_CAP);
+ ASSERT_BIT_CLEAR(ahci->cap, AHCI_CAP_RESERVED);
/* 2 GHC - Global Host Control */
- reg = AHCI_RREG(AHCI_GHC);
+ reg = ahci_rreg(ahci, AHCI_GHC);
ASSERT_BIT_CLEAR(reg, AHCI_GHC_HR);
ASSERT_BIT_CLEAR(reg, AHCI_GHC_IE);
ASSERT_BIT_CLEAR(reg, AHCI_GHC_MRSM);
- if (BITSET(cap, AHCI_CAP_SAM)) {
+ if (BITSET(ahci->cap, AHCI_CAP_SAM)) {
g_test_message("Supports AHCI-Only Mode: GHC_AE is Read-Only.");
ASSERT_BIT_SET(reg, AHCI_GHC_AE);
} else {
@@ -963,27 +388,27 @@ static void ahci_test_hba_spec(QPCIDevice *ahci, void *hba_base)
}
/* 3 IS - Interrupt Status */
- reg = AHCI_RREG(AHCI_IS);
+ reg = ahci_rreg(ahci, AHCI_IS);
g_assert_cmphex(reg, ==, 0);
/* 4 PI - Ports Implemented */
- ports = AHCI_RREG(AHCI_PI);
+ ports = ahci_rreg(ahci, AHCI_PI);
/* Ports Implemented must be non-zero. */
g_assert_cmphex(ports, !=, 0);
/* Ports Implemented must be <= Number of Ports. */
nports_impl = ctpopl(ports);
- g_assert_cmpuint(((AHCI_CAP_NP & cap) + 1), >=, nports_impl);
+ g_assert_cmpuint(((AHCI_CAP_NP & ahci->cap) + 1), >=, nports_impl);
- g_assert_cmphex(barsize, >, 0);
/* Ports must be within the proper range. Given a mapping of SIZE,
* 256 bytes are used for global HBA control, and the rest is used
* for ports data, at 0x80 bytes each. */
- maxports = (barsize - HBA_DATA_REGION_SIZE) / HBA_PORT_DATA_SIZE;
+ g_assert_cmphex(ahci->barsize, >, 0);
+ maxports = (ahci->barsize - HBA_DATA_REGION_SIZE) / HBA_PORT_DATA_SIZE;
/* e.g, 30 ports for 4K of memory. (4096 - 256) / 128 = 30 */
g_assert_cmphex((reg >> maxports), ==, 0);
/* 5 AHCI Version */
- reg = AHCI_RREG(AHCI_VS);
+ reg = ahci_rreg(ahci, AHCI_VS);
switch (reg) {
case AHCI_VERSION_0_95:
case AHCI_VERSION_1_0:
@@ -996,8 +421,8 @@ static void ahci_test_hba_spec(QPCIDevice *ahci, void *hba_base)
}
/* 6 Command Completion Coalescing Control: depends on CAP.CCCS. */
- reg = AHCI_RREG(AHCI_CCCCTL);
- if (BITSET(cap, AHCI_CAP_CCCS)) {
+ reg = ahci_rreg(ahci, AHCI_CCCCTL);
+ if (BITSET(ahci->cap, AHCI_CAP_CCCS)) {
ASSERT_BIT_CLEAR(reg, AHCI_CCCCTL_EN);
ASSERT_BIT_CLEAR(reg, AHCI_CCCCTL_RESERVED);
ASSERT_BIT_SET(reg, AHCI_CCCCTL_CC);
@@ -1007,19 +432,19 @@ static void ahci_test_hba_spec(QPCIDevice *ahci, void *hba_base)
}
/* 7 CCC_PORTS */
- reg = AHCI_RREG(AHCI_CCCPORTS);
+ reg = ahci_rreg(ahci, AHCI_CCCPORTS);
/* Must be zeroes initially regardless of CAP.CCCS */
g_assert_cmphex(reg, ==, 0);
/* 8 EM_LOC */
- reg = AHCI_RREG(AHCI_EMLOC);
- if (BITCLR(cap, AHCI_CAP_EMS)) {
+ reg = ahci_rreg(ahci, AHCI_EMLOC);
+ if (BITCLR(ahci->cap, AHCI_CAP_EMS)) {
g_assert_cmphex(reg, ==, 0);
}
/* 9 EM_CTL */
- reg = AHCI_RREG(AHCI_EMCTL);
- if (BITSET(cap, AHCI_CAP_EMS)) {
+ reg = ahci_rreg(ahci, AHCI_EMCTL);
+ if (BITSET(ahci->cap, AHCI_CAP_EMS)) {
ASSERT_BIT_CLEAR(reg, AHCI_EMCTL_STSMR);
ASSERT_BIT_CLEAR(reg, AHCI_EMCTL_CTLTM);
ASSERT_BIT_CLEAR(reg, AHCI_EMCTL_CTLRST);
@@ -1029,25 +454,25 @@ static void ahci_test_hba_spec(QPCIDevice *ahci, void *hba_base)
}
/* 10 CAP2 -- Capabilities Extended */
- cap2 = AHCI_RREG(AHCI_CAP2);
- ASSERT_BIT_CLEAR(cap2, AHCI_CAP2_RESERVED);
+ ahci->cap2 = ahci_rreg(ahci, AHCI_CAP2);
+ ASSERT_BIT_CLEAR(ahci->cap2, AHCI_CAP2_RESERVED);
/* 11 BOHC -- Bios/OS Handoff Control */
- reg = AHCI_RREG(AHCI_BOHC);
+ reg = ahci_rreg(ahci, AHCI_BOHC);
g_assert_cmphex(reg, ==, 0);
/* 12 -- 23: Reserved */
g_test_message("Verifying HBA reserved area is empty.");
for (i = AHCI_RESERVED; i < AHCI_NVMHCI; ++i) {
- reg = AHCI_RREG(i);
+ reg = ahci_rreg(ahci, i);
g_assert_cmphex(reg, ==, 0);
}
/* 24 -- 39: NVMHCI */
- if (BITCLR(cap2, AHCI_CAP2_NVMP)) {
+ if (BITCLR(ahci->cap2, AHCI_CAP2_NVMP)) {
g_test_message("Verifying HBA/NVMHCI area is empty.");
for (i = AHCI_NVMHCI; i < AHCI_VENDOR; ++i) {
- reg = AHCI_RREG(i);
+ reg = ahci_rreg(ahci, i);
g_assert_cmphex(reg, ==, 0);
}
}
@@ -1055,17 +480,15 @@ static void ahci_test_hba_spec(QPCIDevice *ahci, void *hba_base)
/* 40 -- 63: Vendor */
g_test_message("Verifying HBA/Vendor area is empty.");
for (i = AHCI_VENDOR; i < AHCI_PORTS; ++i) {
- reg = AHCI_RREG(i);
+ reg = ahci_rreg(ahci, i);
g_assert_cmphex(reg, ==, 0);
}
/* 64 -- XX: Port Space */
- hcap.cap = cap;
- hcap.cap2 = cap2;
for (i = 0; ports || (i < maxports); ports >>= 1, ++i) {
if (BITSET(ports, 0x1)) {
g_test_message("Testing port %u for spec", i);
- ahci_test_port_spec(ahci, hba_base, &hcap, i);
+ ahci_test_port_spec(ahci, i);
} else {
uint16_t j;
uint16_t low = AHCI_PORTS + (32 * i);
@@ -1074,7 +497,7 @@ static void ahci_test_hba_spec(QPCIDevice *ahci, void *hba_base)
"(reg [%u-%u]) is empty.",
i, low, high - 1);
for (j = low; j < high; ++j) {
- reg = AHCI_RREG(j);
+ reg = ahci_rreg(ahci, j);
g_assert_cmphex(reg, ==, 0);
}
}
@@ -1084,42 +507,41 @@ static void ahci_test_hba_spec(QPCIDevice *ahci, void *hba_base)
/**
* Test the memory space for one port for specification adherence.
*/
-static void ahci_test_port_spec(QPCIDevice *ahci, void *hba_base,
- HBACap *hcap, uint8_t port)
+static void ahci_test_port_spec(AHCIQState *ahci, uint8_t port)
{
uint32_t reg;
unsigned i;
/* (0) CLB */
- reg = PX_RREG(port, AHCI_PX_CLB);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_CLB);
ASSERT_BIT_CLEAR(reg, AHCI_PX_CLB_RESERVED);
/* (1) CLBU */
- if (BITCLR(hcap->cap, AHCI_CAP_S64A)) {
- reg = PX_RREG(port, AHCI_PX_CLBU);
+ if (BITCLR(ahci->cap, AHCI_CAP_S64A)) {
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_CLBU);
g_assert_cmphex(reg, ==, 0);
}
/* (2) FB */
- reg = PX_RREG(port, AHCI_PX_FB);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_FB);
ASSERT_BIT_CLEAR(reg, AHCI_PX_FB_RESERVED);
/* (3) FBU */
- if (BITCLR(hcap->cap, AHCI_CAP_S64A)) {
- reg = PX_RREG(port, AHCI_PX_FBU);
+ if (BITCLR(ahci->cap, AHCI_CAP_S64A)) {
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_FBU);
g_assert_cmphex(reg, ==, 0);
}
/* (4) IS */
- reg = PX_RREG(port, AHCI_PX_IS);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_IS);
g_assert_cmphex(reg, ==, 0);
/* (5) IE */
- reg = PX_RREG(port, AHCI_PX_IE);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_IE);
g_assert_cmphex(reg, ==, 0);
/* (6) CMD */
- reg = PX_RREG(port, AHCI_PX_CMD);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_CMD);
ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_FRE);
ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_RESERVED);
ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_CCS);
@@ -1141,7 +563,7 @@ static void ahci_test_port_spec(QPCIDevice *ahci, void *hba_base,
ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_MPSS);
}
/* If we do not support MPS, MPSS and MPSP must be off. */
- if (BITCLR(hcap->cap, AHCI_CAP_SMPS)) {
+ if (BITCLR(ahci->cap, AHCI_CAP_SMPS)) {
ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_MPSS);
ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_MPSP);
}
@@ -1152,16 +574,16 @@ static void ahci_test_port_spec(QPCIDevice *ahci, void *hba_base,
/* HPCP and ESP cannot both be active. */
g_assert(!BITSET(reg, AHCI_PX_CMD_HPCP | AHCI_PX_CMD_ESP));
/* If CAP.FBSS is not set, FBSCP must not be set. */
- if (BITCLR(hcap->cap, AHCI_CAP_FBSS)) {
+ if (BITCLR(ahci->cap, AHCI_CAP_FBSS)) {
ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_FBSCP);
}
/* (7) RESERVED */
- reg = PX_RREG(port, AHCI_PX_RES1);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_RES1);
g_assert_cmphex(reg, ==, 0);
/* (8) TFD */
- reg = PX_RREG(port, AHCI_PX_TFD);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_TFD);
/* At boot, prior to an FIS being received, the TFD register should be 0x7F,
* which breaks down as follows, as seen in AHCI 1.3 sec 3.3.8, p. 27. */
ASSERT_BIT_SET(reg, AHCI_PX_TFD_STS_ERR);
@@ -1179,53 +601,53 @@ static void ahci_test_port_spec(QPCIDevice *ahci, void *hba_base,
* so we cannot expect a value here. AHCI 1.3, sec 3.3.9, pp 27-28 */
/* (10) SSTS / SCR0: SStatus */
- reg = PX_RREG(port, AHCI_PX_SSTS);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_SSTS);
ASSERT_BIT_CLEAR(reg, AHCI_PX_SSTS_RESERVED);
/* Even though the register should be 0 at boot, it is asynchronous and
* prone to change, so we cannot test any well known value. */
/* (11) SCTL / SCR2: SControl */
- reg = PX_RREG(port, AHCI_PX_SCTL);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_SCTL);
g_assert_cmphex(reg, ==, 0);
/* (12) SERR / SCR1: SError */
- reg = PX_RREG(port, AHCI_PX_SERR);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_SERR);
g_assert_cmphex(reg, ==, 0);
/* (13) SACT / SCR3: SActive */
- reg = PX_RREG(port, AHCI_PX_SACT);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_SACT);
g_assert_cmphex(reg, ==, 0);
/* (14) CI */
- reg = PX_RREG(port, AHCI_PX_CI);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_CI);
g_assert_cmphex(reg, ==, 0);
/* (15) SNTF */
- reg = PX_RREG(port, AHCI_PX_SNTF);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_SNTF);
g_assert_cmphex(reg, ==, 0);
/* (16) FBS */
- reg = PX_RREG(port, AHCI_PX_FBS);
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_FBS);
ASSERT_BIT_CLEAR(reg, AHCI_PX_FBS_EN);
ASSERT_BIT_CLEAR(reg, AHCI_PX_FBS_DEC);
ASSERT_BIT_CLEAR(reg, AHCI_PX_FBS_SDE);
ASSERT_BIT_CLEAR(reg, AHCI_PX_FBS_DEV);
ASSERT_BIT_CLEAR(reg, AHCI_PX_FBS_DWE);
ASSERT_BIT_CLEAR(reg, AHCI_PX_FBS_RESERVED);
- if (BITSET(hcap->cap, AHCI_CAP_FBSS)) {
+ if (BITSET(ahci->cap, AHCI_CAP_FBSS)) {
/* if Port-Multiplier FIS-based switching avail, ADO must >= 2 */
g_assert((reg & AHCI_PX_FBS_ADO) >> ctzl(AHCI_PX_FBS_ADO) >= 2);
}
/* [17 -- 27] RESERVED */
for (i = AHCI_PX_RES2; i < AHCI_PX_VS; ++i) {
- reg = PX_RREG(port, i);
+ reg = ahci_px_rreg(ahci, port, i);
g_assert_cmphex(reg, ==, 0);
}
/* [28 -- 31] Vendor-Specific */
for (i = AHCI_PX_VS; i < 32; ++i) {
- reg = PX_RREG(port, i);
+ reg = ahci_px_rreg(ahci, port, i);
if (reg) {
g_test_message("INFO: Vendor register %u non-empty", i);
}
@@ -1236,164 +658,46 @@ static void ahci_test_port_spec(QPCIDevice *ahci, void *hba_base,
* Utilizing an initialized AHCI HBA, issue an IDENTIFY command to the first
* device we see, then read and check the response.
*/
-static void ahci_test_identify(QPCIDevice *ahci, void *hba_base)
+static void ahci_test_identify(AHCIQState *ahci)
{
- RegD2HFIS *d2h = g_malloc0(0x20);
- RegD2HFIS *pio = g_malloc0(0x20);
- RegH2DFIS fis;
- AHCICommand cmd;
- PRD prd;
- uint32_t ports, reg, clb, table, fb, data_ptr;
uint16_t buff[256];
- unsigned i;
+ unsigned px;
int rc;
+ uint16_t sect_size;
+ const size_t buffsize = 512;
g_assert(ahci != NULL);
- g_assert(hba_base != NULL);
-
- /* We need to:
- * (1) Create a Command Table Buffer and update the Command List Slot #0
- * to point to this buffer.
- * (2) Construct an FIS host-to-device command structure, and write it to
- * the top of the command table buffer.
- * (3) Create a data buffer for the IDENTIFY response to be sent to
- * (4) Create a Physical Region Descriptor that points to the data buffer,
- * and write it to the bottom (offset 0x80) of the command table.
- * (5) Now, PxCLB points to the command list, command 0 points to
- * our table, and our table contains an FIS instruction and a
- * PRD that points to our rx buffer.
- * (6) We inform the HBA via PxCI that there is a command ready in slot #0.
+
+ /**
+ * This serves as a bit of a tutorial on AHCI device programming:
+ *
+ * (1) Create a data buffer for the IDENTIFY response to be sent to
+ * (2) Create a Command Table buffer, where we will store the
+ * command and PRDT (Physical Region Descriptor Table)
+ * (3) Construct an FIS host-to-device command structure, and write it to
+ * the top of the Command Table buffer.
+ * (4) Create one or more Physical Region Descriptors (PRDs) that describe
+ * a location in memory where data may be stored/retrieved.
+ * (5) Write these PRDTs to the bottom (offset 0x80) of the Command Table.
+ * (6) Each AHCI port has up to 32 command slots. Each slot contains a
+ * header that points to a Command Table buffer. Pick an unused slot
+ * and update it to point to the Command Table we have built.
+ * (7) Now: Command #n points to our Command Table, and our Command Table
+ * contains the FIS (that describes our command) and the PRDTL, which
+ * describes our buffer.
+ * (8) We inform the HBA via PxCI (Command Issue) that the command in slot
+ * #n is ready for processing.
*/
/* Pick the first implemented and running port */
- ports = AHCI_RREG(AHCI_PI);
- for (i = 0; i < 32; ports >>= 1, ++i) {
- if (ports == 0) {
- i = 32;
- }
-
- if (!(ports & 0x01)) {
- continue;
- }
-
- reg = PX_RREG(i, AHCI_PX_CMD);
- if (BITSET(reg, AHCI_PX_CMD_ST)) {
- break;
- }
- }
- g_assert_cmphex(i, <, 32);
- g_test_message("Selected port %u for test", i);
-
- /* Clear out this port's interrupts (ignore the init register d2h fis) */
- reg = PX_RREG(i, AHCI_PX_IS);
- PX_WREG(i, AHCI_PX_IS, reg);
- g_assert_cmphex(PX_RREG(i, AHCI_PX_IS), ==, 0);
-
- /* Wipe the FIS-Receive Buffer */
- fb = PX_RREG(i, AHCI_PX_FB);
- g_assert_cmphex(fb, !=, 0);
- qmemset(fb, 0x00, 0x100);
-
- /* Create a Command Table buffer. 0x80 is the smallest with a PRDTL of 0. */
- /* We need at least one PRD, so round up to the nearest 0x80 multiple. */
- table = guest_alloc(guest_malloc, CMD_TBL_SIZ(1));
- g_assert(table);
- ASSERT_BIT_CLEAR(table, 0x7F);
-
- /* Create a data buffer ... where we will dump the IDENTIFY data to. */
- data_ptr = guest_alloc(guest_malloc, 512);
- g_assert(data_ptr);
-
- /* Grab the Command List Buffer pointer */
- clb = PX_RREG(i, AHCI_PX_CLB);
- g_assert(clb);
-
- /* Copy the existing Command #0 structure from the CLB into local memory,
- * and build a new command #0. */
- memread(clb, &cmd, sizeof(cmd));
- cmd.b1 = 5; /* reg_h2d_fis is 5 double-words long */
- cmd.b2 = 0x04; /* clear PxTFD.STS.BSY when done */
- cmd.prdtl = cpu_to_le16(1); /* One PRD table entry. */
- cmd.prdbc = 0;
- cmd.ctba = cpu_to_le32(table);
- cmd.ctbau = 0;
-
- /* Construct our PRD, noting that DBC is 0-indexed. */
- prd.dba = cpu_to_le32(data_ptr);
- prd.dbau = 0;
- prd.res = 0;
- /* 511+1 bytes, request DPS interrupt */
- prd.dbc = cpu_to_le32(511 | 0x80000000);
-
- /* Construct our Command FIS, Based on http://wiki.osdev.org/AHCI */
- memset(&fis, 0x00, sizeof(fis));
- fis.fis_type = 0x27; /* Register Host-to-Device FIS */
- fis.command = 0xEC; /* IDENTIFY */
- fis.device = 0;
- fis.flags = 0x80; /* Indicate this is a command FIS */
-
- /* We've committed nothing yet, no interrupts should be posted yet. */
- g_assert_cmphex(PX_RREG(i, AHCI_PX_IS), ==, 0);
-
- /* Commit the Command FIS to the Command Table */
- memwrite(table, &fis, sizeof(fis));
-
- /* Commit the PRD entry to the Command Table */
- memwrite(table + 0x80, &prd, sizeof(prd));
-
- /* Commit Command #0, pointing to the Table, to the Command List Buffer. */
- memwrite(clb, &cmd, sizeof(cmd));
-
- /* Everything is in place, but we haven't given the go-ahead yet. */
- g_assert_cmphex(PX_RREG(i, AHCI_PX_IS), ==, 0);
-
- /* Issue Command #0 via PxCI */
- PX_WREG(i, AHCI_PX_CI, (1 << 0));
- while (BITSET(PX_RREG(i, AHCI_PX_TFD), AHCI_PX_TFD_STS_BSY)) {
- usleep(50);
- }
+ px = ahci_port_select(ahci);
+ g_test_message("Selected port %u for test", px);
- /* Check for expected interrupts */
- reg = PX_RREG(i, AHCI_PX_IS);
- ASSERT_BIT_SET(reg, AHCI_PX_IS_DHRS);
- ASSERT_BIT_SET(reg, AHCI_PX_IS_PSS);
- /* BUG: we expect AHCI_PX_IS_DPS to be set. */
- ASSERT_BIT_CLEAR(reg, AHCI_PX_IS_DPS);
+ /* Clear out the FIS Receive area and any pending interrupts. */
+ ahci_port_clear(ahci, px);
- /* Clear expected interrupts and assert all interrupts now cleared. */
- PX_WREG(i, AHCI_PX_IS, AHCI_PX_IS_DHRS | AHCI_PX_IS_PSS | AHCI_PX_IS_DPS);
- g_assert_cmphex(PX_RREG(i, AHCI_PX_IS), ==, 0);
-
- /* Check for errors. */
- reg = PX_RREG(i, AHCI_PX_SERR);
- g_assert_cmphex(reg, ==, 0);
- reg = PX_RREG(i, AHCI_PX_TFD);
- ASSERT_BIT_CLEAR(reg, AHCI_PX_TFD_STS_ERR);
- ASSERT_BIT_CLEAR(reg, AHCI_PX_TFD_ERR);
-
- /* Investigate CMD #0, assert that we read 512 bytes */
- memread(clb, &cmd, sizeof(cmd));
- g_assert_cmphex(512, ==, le32_to_cpu(cmd.prdbc));
-
- /* Investigate FIS responses */
- memread(fb + 0x20, pio, 0x20);
- memread(fb + 0x40, d2h, 0x20);
- g_assert_cmphex(pio->fis_type, ==, 0x5f);
- g_assert_cmphex(d2h->fis_type, ==, 0x34);
- g_assert_cmphex(pio->flags, ==, d2h->flags);
- g_assert_cmphex(pio->status, ==, d2h->status);
- g_assert_cmphex(pio->error, ==, d2h->error);
-
- reg = PX_RREG(i, AHCI_PX_TFD);
- g_assert_cmphex((reg & AHCI_PX_TFD_ERR), ==, pio->error);
- g_assert_cmphex((reg & AHCI_PX_TFD_STS), ==, pio->status);
- /* The PIO Setup FIS contains a "bytes read" field, which is a
- * 16-bit value. The Physical Region Descriptor Byte Count is
- * 32-bit, but for small transfers using one PRD, it should match. */
- g_assert_cmphex(le16_to_cpu(pio->res4), ==, le32_to_cpu(cmd.prdbc));
-
- /* Last, but not least: Investigate the IDENTIFY response data. */
- memread(data_ptr, &buff, 512);
+ /* "Read" 512 bytes using CMD_IDENTIFY into the host buffer. */
+ ahci_io(ahci, px, CMD_IDENTIFY, &buff, buffsize);
/* Check serial number/version in the buffer */
/* NB: IDENTIFY strings are packed in 16bit little endian chunks.
@@ -1408,8 +712,48 @@ static void ahci_test_identify(QPCIDevice *ahci, void *hba_base)
rc = memcmp(&buff[23], "version ", 8);
g_assert_cmphex(rc, ==, 0);
- g_free(d2h);
- g_free(pio);
+ sect_size = le16_to_cpu(*((uint16_t *)(&buff[5])));
+ g_assert_cmphex(sect_size, ==, 0x200);
+}
+
+static void ahci_test_dma_rw_simple(AHCIQState *ahci)
+{
+ uint64_t ptr;
+ uint8_t port;
+ unsigned i;
+ const unsigned bufsize = 4096;
+ unsigned char *tx = g_malloc(bufsize);
+ unsigned char *rx = g_malloc0(bufsize);
+
+ g_assert(ahci != NULL);
+
+ /* Pick the first running port and clear it. */
+ port = ahci_port_select(ahci);
+ ahci_port_clear(ahci, port);
+
+ /*** Create pattern and transfer to guest ***/
+ /* Data buffer in the guest */
+ ptr = ahci_alloc(ahci, bufsize);
+ g_assert(ptr);
+
+ /* Write some indicative pattern to our 4K buffer. */
+ for (i = 0; i < bufsize; i++) {
+ tx[i] = (bufsize - i);
+ }
+ memwrite(ptr, tx, bufsize);
+
+ /* Write this buffer to disk, then read it back to the DMA buffer. */
+ ahci_guest_io(ahci, port, CMD_WRITE_DMA, ptr, bufsize);
+ qmemset(ptr, 0x00, bufsize);
+ ahci_guest_io(ahci, port, CMD_READ_DMA, ptr, bufsize);
+
+ /*** Read back the Data ***/
+ memread(ptr, rx, bufsize);
+ g_assert_cmphex(memcmp(tx, rx, bufsize), ==, 0);
+
+ ahci_free(ahci, ptr);
+ g_free(tx);
+ g_free(rx);
}
/******************************************************************************/
@@ -1421,7 +765,7 @@ static void ahci_test_identify(QPCIDevice *ahci, void *hba_base)
*/
static void test_sanity(void)
{
- QPCIDevice *ahci;
+ AHCIQState *ahci;
ahci = ahci_boot();
ahci_shutdown(ahci);
}
@@ -1432,7 +776,7 @@ static void test_sanity(void)
*/
static void test_pci_spec(void)
{
- QPCIDevice *ahci;
+ AHCIQState *ahci;
ahci = ahci_boot();
ahci_test_pci_spec(ahci);
ahci_shutdown(ahci);
@@ -1444,10 +788,10 @@ static void test_pci_spec(void)
*/
static void test_pci_enable(void)
{
- QPCIDevice *ahci;
- void *hba_base;
+ AHCIQState *ahci;
+
ahci = ahci_boot();
- ahci_pci_enable(ahci, &hba_base);
+ ahci_pci_enable(ahci);
ahci_shutdown(ahci);
}
@@ -1457,12 +801,11 @@ static void test_pci_enable(void)
*/
static void test_hba_spec(void)
{
- QPCIDevice *ahci;
- void *hba_base;
+ AHCIQState *ahci;
ahci = ahci_boot();
- ahci_pci_enable(ahci, &hba_base);
- ahci_test_hba_spec(ahci, hba_base);
+ ahci_pci_enable(ahci);
+ ahci_test_hba_spec(ahci);
ahci_shutdown(ahci);
}
@@ -1472,12 +815,11 @@ static void test_hba_spec(void)
*/
static void test_hba_enable(void)
{
- QPCIDevice *ahci;
- void *hba_base;
+ AHCIQState *ahci;
ahci = ahci_boot();
- ahci_pci_enable(ahci, &hba_base);
- ahci_hba_enable(ahci, hba_base);
+ ahci_pci_enable(ahci);
+ ahci_hba_enable(ahci);
ahci_shutdown(ahci);
}
@@ -1487,13 +829,26 @@ static void test_hba_enable(void)
*/
static void test_identify(void)
{
- QPCIDevice *ahci;
- void *hba_base;
+ AHCIQState *ahci;
+
+ ahci = ahci_boot();
+ ahci_pci_enable(ahci);
+ ahci_hba_enable(ahci);
+ ahci_test_identify(ahci);
+ ahci_shutdown(ahci);
+}
+
+/**
+ * Perform a simple DMA R/W test, using a single PRD and non-NCQ commands.
+ */
+static void test_dma_rw_simple(void)
+{
+ AHCIQState *ahci;
ahci = ahci_boot();
- ahci_pci_enable(ahci, &hba_base);
- ahci_hba_enable(ahci, hba_base);
- ahci_test_identify(ahci, hba_base);
+ ahci_pci_enable(ahci);
+ ahci_hba_enable(ahci);
+ ahci_test_dma_rw_simple(ahci);
ahci_shutdown(ahci);
}
@@ -1552,6 +907,7 @@ int main(int argc, char **argv)
qtest_add_func("/ahci/hba_spec", test_hba_spec);
qtest_add_func("/ahci/hba_enable", test_hba_enable);
qtest_add_func("/ahci/identify", test_identify);
+ qtest_add_func("/ahci/dma/simple", test_dma_rw_simple);
ret = g_test_run();
diff --git a/tests/libqos/ahci.c b/tests/libqos/ahci.c
new file mode 100644
index 0000000000..a6105c750f
--- /dev/null
+++ b/tests/libqos/ahci.c
@@ -0,0 +1,838 @@
+/*
+ * libqos AHCI functions
+ *
+ * Copyright (c) 2014 John Snow <jsnow@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <glib.h>
+
+#include "libqtest.h"
+#include "libqos/ahci.h"
+#include "libqos/pci-pc.h"
+
+#include "qemu-common.h"
+#include "qemu/host-utils.h"
+
+#include "hw/pci/pci_ids.h"
+#include "hw/pci/pci_regs.h"
+
+typedef struct AHCICommandProp {
+ uint8_t cmd; /* Command Code */
+ bool data; /* Data transfer command? */
+ bool pio;
+ bool dma;
+ bool lba28;
+ bool lba48;
+ bool read;
+ bool write;
+ bool atapi;
+ bool ncq;
+ uint64_t size; /* Static transfer size, for commands like IDENTIFY. */
+ uint32_t interrupts; /* Expected interrupts for this command. */
+} AHCICommandProp;
+
+AHCICommandProp ahci_command_properties[] = {
+ { .cmd = CMD_READ_PIO, .data = true, .pio = true,
+ .lba28 = true, .read = true },
+ { .cmd = CMD_WRITE_PIO, .data = true, .pio = true,
+ .lba28 = true, .write = true },
+ { .cmd = CMD_READ_PIO_EXT, .data = true, .pio = true,
+ .lba48 = true, .read = true },
+ { .cmd = CMD_WRITE_PIO_EXT, .data = true, .pio = true,
+ .lba48 = true, .write = true },
+ { .cmd = CMD_READ_DMA, .data = true, .dma = true,
+ .lba28 = true, .read = true },
+ { .cmd = CMD_WRITE_DMA, .data = true, .dma = true,
+ .lba28 = true, .write = true },
+ { .cmd = CMD_READ_DMA_EXT, .data = true, .dma = true,
+ .lba48 = true, .read = true },
+ { .cmd = CMD_WRITE_DMA_EXT, .data = true, .dma = true,
+ .lba48 = true, .write = true },
+ { .cmd = CMD_IDENTIFY, .data = true, .pio = true,
+ .size = 512, .read = true },
+ { .cmd = CMD_READ_MAX, .lba28 = true },
+ { .cmd = CMD_READ_MAX_EXT, .lba48 = true },
+ { .cmd = CMD_FLUSH_CACHE, .data = false }
+};
+
+/**
+ * Allocate space in the guest using information in the AHCIQState object.
+ */
+uint64_t ahci_alloc(AHCIQState *ahci, size_t bytes)
+{
+ g_assert(ahci);
+ g_assert(ahci->parent);
+ return qmalloc(ahci->parent, bytes);
+}
+
+void ahci_free(AHCIQState *ahci, uint64_t addr)
+{
+ g_assert(ahci);
+ g_assert(ahci->parent);
+ qfree(ahci->parent, addr);
+}
+
+/**
+ * Locate, verify, and return a handle to the AHCI device.
+ */
+QPCIDevice *get_ahci_device(uint32_t *fingerprint)
+{
+ QPCIDevice *ahci;
+ uint32_t ahci_fingerprint;
+ QPCIBus *pcibus;
+
+ pcibus = qpci_init_pc();
+
+ /* Find the AHCI PCI device and verify it's the right one. */
+ ahci = qpci_device_find(pcibus, QPCI_DEVFN(0x1F, 0x02));
+ g_assert(ahci != NULL);
+
+ ahci_fingerprint = qpci_config_readl(ahci, PCI_VENDOR_ID);
+
+ switch (ahci_fingerprint) {
+ case AHCI_INTEL_ICH9:
+ break;
+ default:
+ /* Unknown device. */
+ g_assert_not_reached();
+ }
+
+ if (fingerprint) {
+ *fingerprint = ahci_fingerprint;
+ }
+ return ahci;
+}
+
+void free_ahci_device(QPCIDevice *dev)
+{
+ QPCIBus *pcibus = dev ? dev->bus : NULL;
+
+ /* libqos doesn't have a function for this, so free it manually */
+ g_free(dev);
+ qpci_free_pc(pcibus);
+}
+
+/* Free all memory in-use by the AHCI device. */
+void ahci_clean_mem(AHCIQState *ahci)
+{
+ uint8_t port, slot;
+
+ for (port = 0; port < 32; ++port) {
+ if (ahci->port[port].fb) {
+ ahci_free(ahci, ahci->port[port].fb);
+ }
+ if (ahci->port[port].clb) {
+ for (slot = 0; slot < 32; slot++) {
+ ahci_destroy_command(ahci, port, slot);
+ }
+ ahci_free(ahci, ahci->port[port].clb);
+ }
+ }
+}
+
+/*** Logical Device Initialization ***/
+
+/**
+ * Start the PCI device and sanity-check default operation.
+ */
+void ahci_pci_enable(AHCIQState *ahci)
+{
+ uint8_t reg;
+
+ start_ahci_device(ahci);
+
+ switch (ahci->fingerprint) {
+ case AHCI_INTEL_ICH9:
+ /* ICH9 has a register at PCI 0x92 that
+ * acts as a master port enabler mask. */
+ reg = qpci_config_readb(ahci->dev, 0x92);
+ reg |= 0x3F;
+ qpci_config_writeb(ahci->dev, 0x92, reg);
+ /* 0...0111111b -- bit significant, ports 0-5 enabled. */
+ ASSERT_BIT_SET(qpci_config_readb(ahci->dev, 0x92), 0x3F);
+ break;
+ }
+
+}
+
+/**
+ * Map BAR5/ABAR, and engage the PCI device.
+ */
+void start_ahci_device(AHCIQState *ahci)
+{
+ /* Map AHCI's ABAR (BAR5) */
+ ahci->hba_base = qpci_iomap(ahci->dev, 5, &ahci->barsize);
+ g_assert(ahci->hba_base);
+
+ /* turns on pci.cmd.iose, pci.cmd.mse and pci.cmd.bme */
+ qpci_device_enable(ahci->dev);
+}
+
+/**
+ * Test and initialize the AHCI's HBA memory areas.
+ * Initialize and start any ports with devices attached.
+ * Bring the HBA into the idle state.
+ */
+void ahci_hba_enable(AHCIQState *ahci)
+{
+ /* Bits of interest in this section:
+ * GHC.AE Global Host Control / AHCI Enable
+ * PxCMD.ST Port Command: Start
+ * PxCMD.SUD "Spin Up Device"
+ * PxCMD.POD "Power On Device"
+ * PxCMD.FRE "FIS Receive Enable"
+ * PxCMD.FR "FIS Receive Running"
+ * PxCMD.CR "Command List Running"
+ */
+ uint32_t reg, ports_impl;
+ uint16_t i;
+ uint8_t num_cmd_slots;
+
+ g_assert(ahci != NULL);
+
+ /* Set GHC.AE to 1 */
+ ahci_set(ahci, AHCI_GHC, AHCI_GHC_AE);
+ reg = ahci_rreg(ahci, AHCI_GHC);
+ ASSERT_BIT_SET(reg, AHCI_GHC_AE);
+
+ /* Cache CAP and CAP2. */
+ ahci->cap = ahci_rreg(ahci, AHCI_CAP);
+ ahci->cap2 = ahci_rreg(ahci, AHCI_CAP2);
+
+ /* Read CAP.NCS, how many command slots do we have? */
+ num_cmd_slots = ((ahci->cap & AHCI_CAP_NCS) >> ctzl(AHCI_CAP_NCS)) + 1;
+ g_test_message("Number of Command Slots: %u", num_cmd_slots);
+
+ /* Determine which ports are implemented. */
+ ports_impl = ahci_rreg(ahci, AHCI_PI);
+
+ for (i = 0; ports_impl; ports_impl >>= 1, ++i) {
+ if (!(ports_impl & 0x01)) {
+ continue;
+ }
+
+ g_test_message("Initializing port %u", i);
+
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_CMD);
+ if (BITCLR(reg, AHCI_PX_CMD_ST | AHCI_PX_CMD_CR |
+ AHCI_PX_CMD_FRE | AHCI_PX_CMD_FR)) {
+ g_test_message("port is idle");
+ } else {
+ g_test_message("port needs to be idled");
+ ahci_px_clr(ahci, i, AHCI_PX_CMD,
+ (AHCI_PX_CMD_ST | AHCI_PX_CMD_FRE));
+ /* The port has 500ms to disengage. */
+ usleep(500000);
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_CMD);
+ ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_CR);
+ ASSERT_BIT_CLEAR(reg, AHCI_PX_CMD_FR);
+ g_test_message("port is now idle");
+ /* The spec does allow for possibly needing a PORT RESET
+ * or HBA reset if we fail to idle the port. */
+ }
+
+ /* Allocate Memory for the Command List Buffer & FIS Buffer */
+ /* PxCLB space ... 0x20 per command, as in 4.2.2 p 36 */
+ ahci->port[i].clb = ahci_alloc(ahci, num_cmd_slots * 0x20);
+ qmemset(ahci->port[i].clb, 0x00, 0x100);
+ g_test_message("CLB: 0x%08" PRIx64, ahci->port[i].clb);
+ ahci_px_wreg(ahci, i, AHCI_PX_CLB, ahci->port[i].clb);
+ g_assert_cmphex(ahci->port[i].clb, ==,
+ ahci_px_rreg(ahci, i, AHCI_PX_CLB));
+
+ /* PxFB space ... 0x100, as in 4.2.1 p 35 */
+ ahci->port[i].fb = ahci_alloc(ahci, 0x100);
+ qmemset(ahci->port[i].fb, 0x00, 0x100);
+ g_test_message("FB: 0x%08" PRIx64, ahci->port[i].fb);
+ ahci_px_wreg(ahci, i, AHCI_PX_FB, ahci->port[i].fb);
+ g_assert_cmphex(ahci->port[i].fb, ==,
+ ahci_px_rreg(ahci, i, AHCI_PX_FB));
+
+ /* Clear PxSERR, PxIS, then IS.IPS[x] by writing '1's. */
+ ahci_px_wreg(ahci, i, AHCI_PX_SERR, 0xFFFFFFFF);
+ ahci_px_wreg(ahci, i, AHCI_PX_IS, 0xFFFFFFFF);
+ ahci_wreg(ahci, AHCI_IS, (1 << i));
+
+ /* Verify Interrupts Cleared */
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_SERR);
+ g_assert_cmphex(reg, ==, 0);
+
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_IS);
+ g_assert_cmphex(reg, ==, 0);
+
+ reg = ahci_rreg(ahci, AHCI_IS);
+ ASSERT_BIT_CLEAR(reg, (1 << i));
+
+ /* Enable All Interrupts: */
+ ahci_px_wreg(ahci, i, AHCI_PX_IE, 0xFFFFFFFF);
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_IE);
+ g_assert_cmphex(reg, ==, ~((uint32_t)AHCI_PX_IE_RESERVED));
+
+ /* Enable the FIS Receive Engine. */
+ ahci_px_set(ahci, i, AHCI_PX_CMD, AHCI_PX_CMD_FRE);
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_CMD);
+ ASSERT_BIT_SET(reg, AHCI_PX_CMD_FR);
+
+ /* AHCI 1.3 spec: if !STS.BSY, !STS.DRQ and PxSSTS.DET indicates
+ * physical presence, a device is present and may be started. However,
+ * PxSERR.DIAG.X /may/ need to be cleared a priori. */
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_SERR);
+ if (BITSET(reg, AHCI_PX_SERR_DIAG_X)) {
+ ahci_px_set(ahci, i, AHCI_PX_SERR, AHCI_PX_SERR_DIAG_X);
+ }
+
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_TFD);
+ if (BITCLR(reg, AHCI_PX_TFD_STS_BSY | AHCI_PX_TFD_STS_DRQ)) {
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_SSTS);
+ if ((reg & AHCI_PX_SSTS_DET) == SSTS_DET_ESTABLISHED) {
+ /* Device Found: set PxCMD.ST := 1 */
+ ahci_px_set(ahci, i, AHCI_PX_CMD, AHCI_PX_CMD_ST);
+ ASSERT_BIT_SET(ahci_px_rreg(ahci, i, AHCI_PX_CMD),
+ AHCI_PX_CMD_CR);
+ g_test_message("Started Device %u", i);
+ } else if ((reg & AHCI_PX_SSTS_DET)) {
+ /* Device present, but in some unknown state. */
+ g_assert_not_reached();
+ }
+ }
+ }
+
+ /* Enable GHC.IE */
+ ahci_set(ahci, AHCI_GHC, AHCI_GHC_IE);
+ reg = ahci_rreg(ahci, AHCI_GHC);
+ ASSERT_BIT_SET(reg, AHCI_GHC_IE);
+
+ /* TODO: The device should now be idling and waiting for commands.
+ * In the future, a small test-case to inspect the Register D2H FIS
+ * and clear the initial interrupts might be good. */
+}
+
+/**
+ * Pick the first implemented and running port
+ */
+unsigned ahci_port_select(AHCIQState *ahci)
+{
+ uint32_t ports, reg;
+ unsigned i;
+
+ ports = ahci_rreg(ahci, AHCI_PI);
+ for (i = 0; i < 32; ports >>= 1, ++i) {
+ if (ports == 0) {
+ i = 32;
+ }
+
+ if (!(ports & 0x01)) {
+ continue;
+ }
+
+ reg = ahci_px_rreg(ahci, i, AHCI_PX_CMD);
+ if (BITSET(reg, AHCI_PX_CMD_ST)) {
+ break;
+ }
+ }
+ g_assert(i < 32);
+ return i;
+}
+
+/**
+ * Clear a port's interrupts and status information prior to a test.
+ */
+void ahci_port_clear(AHCIQState *ahci, uint8_t port)
+{
+ uint32_t reg;
+
+ /* Clear out this port's interrupts (ignore the init register d2h fis) */
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_IS);
+ ahci_px_wreg(ahci, port, AHCI_PX_IS, reg);
+ g_assert_cmphex(ahci_px_rreg(ahci, port, AHCI_PX_IS), ==, 0);
+
+ /* Wipe the FIS-Recieve Buffer */
+ qmemset(ahci->port[port].fb, 0x00, 0x100);
+}
+
+/**
+ * Check a port for errors.
+ */
+void ahci_port_check_error(AHCIQState *ahci, uint8_t port)
+{
+ uint32_t reg;
+
+ /* The upper 9 bits of the IS register all indicate errors. */
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_IS);
+ reg >>= 23;
+ g_assert_cmphex(reg, ==, 0);
+
+ /* The Sata Error Register should be empty. */
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_SERR);
+ g_assert_cmphex(reg, ==, 0);
+
+ /* The TFD also has two error sections. */
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_TFD);
+ ASSERT_BIT_CLEAR(reg, AHCI_PX_TFD_STS_ERR);
+ ASSERT_BIT_CLEAR(reg, AHCI_PX_TFD_ERR);
+}
+
+void ahci_port_check_interrupts(AHCIQState *ahci, uint8_t port,
+ uint32_t intr_mask)
+{
+ uint32_t reg;
+
+ /* Check for expected interrupts */
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_IS);
+ ASSERT_BIT_SET(reg, intr_mask);
+
+ /* Clear expected interrupts and assert all interrupts now cleared. */
+ ahci_px_wreg(ahci, port, AHCI_PX_IS, intr_mask);
+ g_assert_cmphex(ahci_px_rreg(ahci, port, AHCI_PX_IS), ==, 0);
+}
+
+void ahci_port_check_nonbusy(AHCIQState *ahci, uint8_t port, uint8_t slot)
+{
+ uint32_t reg;
+
+ /* Assert that the command slot is no longer busy (NCQ) */
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_SACT);
+ ASSERT_BIT_CLEAR(reg, (1 << slot));
+
+ /* Non-NCQ */
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_CI);
+ ASSERT_BIT_CLEAR(reg, (1 << slot));
+
+ /* And assert that we are generally not busy. */
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_TFD);
+ ASSERT_BIT_CLEAR(reg, AHCI_PX_TFD_STS_BSY);
+ ASSERT_BIT_CLEAR(reg, AHCI_PX_TFD_STS_DRQ);
+}
+
+void ahci_port_check_d2h_sanity(AHCIQState *ahci, uint8_t port, uint8_t slot)
+{
+ RegD2HFIS *d2h = g_malloc0(0x20);
+ uint32_t reg;
+
+ memread(ahci->port[port].fb + 0x40, d2h, 0x20);
+ g_assert_cmphex(d2h->fis_type, ==, 0x34);
+
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_TFD);
+ g_assert_cmphex((reg & AHCI_PX_TFD_ERR) >> 8, ==, d2h->error);
+ g_assert_cmphex((reg & AHCI_PX_TFD_STS), ==, d2h->status);
+
+ g_free(d2h);
+}
+
+void ahci_port_check_pio_sanity(AHCIQState *ahci, uint8_t port,
+ uint8_t slot, size_t buffsize)
+{
+ PIOSetupFIS *pio = g_malloc0(0x20);
+
+ /* We cannot check the Status or E_Status registers, becuase
+ * the status may have again changed between the PIO Setup FIS
+ * and the conclusion of the command with the D2H Register FIS. */
+ memread(ahci->port[port].fb + 0x20, pio, 0x20);
+ g_assert_cmphex(pio->fis_type, ==, 0x5f);
+
+ /* BUG: PIO Setup FIS as utilized by QEMU tries to fit the entire
+ * transfer size in a uint16_t field. The maximum transfer size can
+ * eclipse this; the field is meant to convey the size of data per
+ * each Data FIS, not the entire operation as a whole. For now,
+ * we will sanity check the broken case where applicable. */
+ if (buffsize <= UINT16_MAX) {
+ g_assert_cmphex(le16_to_cpu(pio->tx_count), ==, buffsize);
+ }
+
+ g_free(pio);
+}
+
+void ahci_port_check_cmd_sanity(AHCIQState *ahci, uint8_t port,
+ uint8_t slot, size_t buffsize)
+{
+ AHCICommandHeader cmd;
+
+ ahci_get_command_header(ahci, port, slot, &cmd);
+ g_assert_cmphex(buffsize, ==, cmd.prdbc);
+}
+
+/* Get the command in #slot of port #port. */
+void ahci_get_command_header(AHCIQState *ahci, uint8_t port,
+ uint8_t slot, AHCICommandHeader *cmd)
+{
+ uint64_t ba = ahci->port[port].clb;
+ ba += slot * sizeof(AHCICommandHeader);
+ memread(ba, cmd, sizeof(AHCICommandHeader));
+
+ cmd->flags = le16_to_cpu(cmd->flags);
+ cmd->prdtl = le16_to_cpu(cmd->prdtl);
+ cmd->prdbc = le32_to_cpu(cmd->prdbc);
+ cmd->ctba = le64_to_cpu(cmd->ctba);
+}
+
+/* Set the command in #slot of port #port. */
+void ahci_set_command_header(AHCIQState *ahci, uint8_t port,
+ uint8_t slot, AHCICommandHeader *cmd)
+{
+ AHCICommandHeader tmp;
+ uint64_t ba = ahci->port[port].clb;
+ ba += slot * sizeof(AHCICommandHeader);
+
+ tmp.flags = cpu_to_le16(cmd->flags);
+ tmp.prdtl = cpu_to_le16(cmd->prdtl);
+ tmp.prdbc = cpu_to_le32(cmd->prdbc);
+ tmp.ctba = cpu_to_le64(cmd->ctba);
+
+ memwrite(ba, &tmp, sizeof(AHCICommandHeader));
+}
+
+void ahci_destroy_command(AHCIQState *ahci, uint8_t port, uint8_t slot)
+{
+ AHCICommandHeader cmd;
+
+ /* Obtain the Nth Command Header */
+ ahci_get_command_header(ahci, port, slot, &cmd);
+ if (cmd.ctba == 0) {
+ /* No address in it, so just return -- it's empty. */
+ goto tidy;
+ }
+
+ /* Free the Table */
+ ahci_free(ahci, cmd.ctba);
+
+ tidy:
+ /* NULL the header. */
+ memset(&cmd, 0x00, sizeof(cmd));
+ ahci_set_command_header(ahci, port, slot, &cmd);
+ ahci->port[port].ctba[slot] = 0;
+ ahci->port[port].prdtl[slot] = 0;
+}
+
+void ahci_write_fis(AHCIQState *ahci, RegH2DFIS *fis, uint64_t addr)
+{
+ RegH2DFIS tmp = *fis;
+
+ /* The auxiliary FIS fields are defined per-command and are not
+ * currently implemented in libqos/ahci.o, but may or may not need
+ * to be flipped. */
+
+ /* All other FIS fields are 8 bit and do not need to be flipped. */
+ tmp.count = cpu_to_le16(tmp.count);
+
+ memwrite(addr, &tmp, sizeof(tmp));
+}
+
+unsigned ahci_pick_cmd(AHCIQState *ahci, uint8_t port)
+{
+ unsigned i;
+ unsigned j;
+ uint32_t reg;
+
+ reg = ahci_px_rreg(ahci, port, AHCI_PX_CI);
+
+ /* Pick the least recently used command slot that's available */
+ for (i = 0; i < 32; ++i) {
+ j = ((ahci->port[port].next + i) % 32);
+ if (reg & (1 << j)) {
+ continue;
+ }
+ ahci_destroy_command(ahci, port, i);
+ ahci->port[port].next = (j + 1) % 32;
+ return j;
+ }
+
+ g_test_message("All command slots were busy.");
+ g_assert_not_reached();
+}
+
+inline unsigned size_to_prdtl(unsigned bytes, unsigned bytes_per_prd)
+{
+ /* Each PRD can describe up to 4MiB */
+ g_assert_cmphex(bytes_per_prd, <=, 4096 * 1024);
+ g_assert_cmphex(bytes_per_prd & 0x01, ==, 0x00);
+ return (bytes + bytes_per_prd - 1) / bytes_per_prd;
+}
+
+/* Given a guest buffer address, perform an IO operation */
+void ahci_guest_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd,
+ uint64_t buffer, size_t bufsize)
+{
+ AHCICommand *cmd;
+
+ cmd = ahci_command_create(ide_cmd);
+ ahci_command_set_buffer(cmd, buffer);
+ ahci_command_set_size(cmd, bufsize);
+ ahci_command_commit(ahci, cmd, port);
+ ahci_command_issue(ahci, cmd);
+ ahci_command_verify(ahci, cmd);
+ ahci_command_free(cmd);
+}
+
+struct AHCICommand {
+ /* Test Management Data */
+ uint8_t name;
+ uint8_t port;
+ uint8_t slot;
+ uint32_t interrupts;
+ uint64_t xbytes;
+ uint32_t prd_size;
+ uint64_t buffer;
+ AHCICommandProp *props;
+ /* Data to be transferred to the guest */
+ AHCICommandHeader header;
+ RegH2DFIS fis;
+ void *atapi_cmd;
+};
+
+static AHCICommandProp *ahci_command_find(uint8_t command_name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ahci_command_properties); i++) {
+ if (ahci_command_properties[i].cmd == command_name) {
+ return &ahci_command_properties[i];
+ }
+ }
+
+ return NULL;
+}
+
+/* Given a HOST buffer, create a buffer address and perform an IO operation. */
+void ahci_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd,
+ void *buffer, size_t bufsize)
+{
+ uint64_t ptr;
+ AHCICommandProp *props;
+
+ props = ahci_command_find(ide_cmd);
+ g_assert(props);
+ ptr = ahci_alloc(ahci, bufsize);
+ g_assert(ptr);
+
+ if (props->write) {
+ memwrite(ptr, buffer, bufsize);
+ }
+
+ ahci_guest_io(ahci, port, ide_cmd, ptr, bufsize);
+
+ if (props->read) {
+ memread(ptr, buffer, bufsize);
+ }
+
+ ahci_free(ahci, ptr);
+}
+
+/**
+ * Initializes a basic command header in memory.
+ * We assume that this is for an ATA command using RegH2DFIS.
+ */
+static void command_header_init(AHCICommand *cmd)
+{
+ AHCICommandHeader *hdr = &cmd->header;
+ AHCICommandProp *props = cmd->props;
+
+ hdr->flags = 5; /* RegH2DFIS is 5 DW long. Must be < 32 */
+ hdr->flags |= CMDH_CLR_BSY; /* Clear the BSY bit when done */
+ if (props->write) {
+ hdr->flags |= CMDH_WRITE;
+ }
+ if (props->atapi) {
+ hdr->flags |= CMDH_ATAPI;
+ }
+ /* Other flags: PREFETCH, RESET, and BIST */
+ hdr->prdtl = size_to_prdtl(cmd->xbytes, cmd->prd_size);
+ hdr->prdbc = 0;
+ hdr->ctba = 0;
+}
+
+static void command_table_init(AHCICommand *cmd)
+{
+ RegH2DFIS *fis = &(cmd->fis);
+
+ fis->fis_type = REG_H2D_FIS;
+ fis->flags = REG_H2D_FIS_CMD; /* "Command" bit */
+ fis->command = cmd->name;
+ cmd->fis.feature_low = 0x00;
+ cmd->fis.feature_high = 0x00;
+ if (cmd->props->lba28 || cmd->props->lba48) {
+ cmd->fis.device = ATA_DEVICE_LBA;
+ }
+ cmd->fis.count = (cmd->xbytes / AHCI_SECTOR_SIZE);
+ cmd->fis.icc = 0x00;
+ cmd->fis.control = 0x00;
+ memset(cmd->fis.aux, 0x00, ARRAY_SIZE(cmd->fis.aux));
+}
+
+AHCICommand *ahci_command_create(uint8_t command_name)
+{
+ AHCICommandProp *props = ahci_command_find(command_name);
+ AHCICommand *cmd;
+
+ g_assert(props);
+ cmd = g_malloc0(sizeof(AHCICommand));
+ g_assert(!(props->dma && props->pio));
+ g_assert(!(props->lba28 && props->lba48));
+ g_assert(!(props->read && props->write));
+ g_assert(!props->size || props->data);
+
+ /* Defaults and book-keeping */
+ cmd->props = props;
+ cmd->name = command_name;
+ cmd->xbytes = props->size;
+ cmd->prd_size = 4096;
+ cmd->buffer = 0xabad1dea;
+
+ cmd->interrupts = AHCI_PX_IS_DHRS;
+ /* BUG: We expect the DPS interrupt for data commands */
+ /* cmd->interrupts |= props->data ? AHCI_PX_IS_DPS : 0; */
+ /* BUG: We expect the DMA Setup interrupt for DMA commands */
+ /* cmd->interrupts |= props->dma ? AHCI_PX_IS_DSS : 0; */
+ cmd->interrupts |= props->pio ? AHCI_PX_IS_PSS : 0;
+
+ command_header_init(cmd);
+ command_table_init(cmd);
+
+ return cmd;
+}
+
+void ahci_command_free(AHCICommand *cmd)
+{
+ g_free(cmd);
+}
+
+void ahci_command_set_buffer(AHCICommand *cmd, uint64_t buffer)
+{
+ cmd->buffer = buffer;
+}
+
+void ahci_command_set_sizes(AHCICommand *cmd, uint64_t xbytes,
+ unsigned prd_size)
+{
+ /* Each PRD can describe up to 4MiB, and must not be odd. */
+ g_assert_cmphex(prd_size, <=, 4096 * 1024);
+ g_assert_cmphex(prd_size & 0x01, ==, 0x00);
+ cmd->prd_size = prd_size;
+ cmd->xbytes = xbytes;
+ cmd->fis.count = (cmd->xbytes / AHCI_SECTOR_SIZE);
+ cmd->header.prdtl = size_to_prdtl(cmd->xbytes, cmd->prd_size);
+}
+
+void ahci_command_set_size(AHCICommand *cmd, uint64_t xbytes)
+{
+ ahci_command_set_sizes(cmd, xbytes, cmd->prd_size);
+}
+
+void ahci_command_set_prd_size(AHCICommand *cmd, unsigned prd_size)
+{
+ ahci_command_set_sizes(cmd, cmd->xbytes, prd_size);
+}
+
+void ahci_command_commit(AHCIQState *ahci, AHCICommand *cmd, uint8_t port)
+{
+ uint16_t i, prdtl;
+ uint64_t table_size, table_ptr, remaining;
+ PRD prd;
+
+ /* This command is now tied to this port/command slot */
+ cmd->port = port;
+ cmd->slot = ahci_pick_cmd(ahci, port);
+
+ /* Create a buffer for the command table */
+ prdtl = size_to_prdtl(cmd->xbytes, cmd->prd_size);
+ table_size = CMD_TBL_SIZ(prdtl);
+ table_ptr = ahci_alloc(ahci, table_size);
+ g_assert(table_ptr);
+ /* AHCI 1.3: Must be aligned to 0x80 */
+ g_assert((table_ptr & 0x7F) == 0x00);
+ cmd->header.ctba = table_ptr;
+
+ /* Commit the command header and command FIS */
+ ahci_set_command_header(ahci, port, cmd->slot, &(cmd->header));
+ ahci_write_fis(ahci, &(cmd->fis), table_ptr);
+
+ /* Construct and write the PRDs to the command table */
+ g_assert_cmphex(prdtl, ==, cmd->header.prdtl);
+ remaining = cmd->xbytes;
+ for (i = 0; i < prdtl; ++i) {
+ prd.dba = cpu_to_le64(cmd->buffer + (cmd->prd_size * i));
+ prd.res = 0;
+ if (remaining > cmd->prd_size) {
+ /* Note that byte count is 0-based. */
+ prd.dbc = cpu_to_le32(cmd->prd_size - 1);
+ remaining -= cmd->prd_size;
+ } else {
+ /* Again, dbc is 0-based. */
+ prd.dbc = cpu_to_le32(remaining - 1);
+ remaining = 0;
+ }
+ prd.dbc |= cpu_to_le32(0x80000000); /* Request DPS Interrupt */
+
+ /* Commit the PRD entry to the Command Table */
+ memwrite(table_ptr + 0x80 + (i * sizeof(PRD)),
+ &prd, sizeof(PRD));
+ }
+
+ /* Bookmark the PRDTL and CTBA values */
+ ahci->port[port].ctba[cmd->slot] = table_ptr;
+ ahci->port[port].prdtl[cmd->slot] = prdtl;
+}
+
+void ahci_command_issue_async(AHCIQState *ahci, AHCICommand *cmd)
+{
+ if (cmd->props->ncq) {
+ ahci_px_wreg(ahci, cmd->port, AHCI_PX_SACT, (1 << cmd->slot));
+ }
+
+ ahci_px_wreg(ahci, cmd->port, AHCI_PX_CI, (1 << cmd->slot));
+}
+
+void ahci_command_wait(AHCIQState *ahci, AHCICommand *cmd)
+{
+ /* We can't rely on STS_BSY until the command has started processing.
+ * Therefore, we also use the Command Issue bit as indication of
+ * a command in-flight. */
+ while (BITSET(ahci_px_rreg(ahci, cmd->port, AHCI_PX_TFD),
+ AHCI_PX_TFD_STS_BSY) ||
+ BITSET(ahci_px_rreg(ahci, cmd->port, AHCI_PX_CI), (1 << cmd->slot))) {
+ usleep(50);
+ }
+}
+
+void ahci_command_issue(AHCIQState *ahci, AHCICommand *cmd)
+{
+ ahci_command_issue_async(ahci, cmd);
+ ahci_command_wait(ahci, cmd);
+}
+
+void ahci_command_verify(AHCIQState *ahci, AHCICommand *cmd)
+{
+ uint8_t slot = cmd->slot;
+ uint8_t port = cmd->port;
+
+ ahci_port_check_error(ahci, port);
+ ahci_port_check_interrupts(ahci, port, cmd->interrupts);
+ ahci_port_check_nonbusy(ahci, port, slot);
+ ahci_port_check_cmd_sanity(ahci, port, slot, cmd->xbytes);
+ ahci_port_check_d2h_sanity(ahci, port, slot);
+ if (cmd->props->pio) {
+ ahci_port_check_pio_sanity(ahci, port, slot, cmd->xbytes);
+ }
+}
+
+uint8_t ahci_command_slot(AHCICommand *cmd)
+{
+ return cmd->slot;
+}
diff --git a/tests/libqos/ahci.h b/tests/libqos/ahci.h
new file mode 100644
index 0000000000..39b99d3658
--- /dev/null
+++ b/tests/libqos/ahci.h
@@ -0,0 +1,549 @@
+#ifndef __libqos_ahci_h
+#define __libqos_ahci_h
+
+/*
+ * AHCI qtest library functions and definitions
+ *
+ * Copyright (c) 2014 John Snow <jsnow@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include "libqos/libqos.h"
+#include "libqos/pci.h"
+#include "libqos/malloc-pc.h"
+
+/*** Supplementary PCI Config Space IDs & Masks ***/
+#define PCI_DEVICE_ID_INTEL_Q35_AHCI (0x2922)
+#define PCI_MSI_FLAGS_RESERVED (0xFF00)
+#define PCI_PM_CTRL_RESERVED (0xFC)
+#define PCI_BCC(REG32) ((REG32) >> 24)
+#define PCI_PI(REG32) (((REG32) >> 8) & 0xFF)
+#define PCI_SCC(REG32) (((REG32) >> 16) & 0xFF)
+
+/*** Recognized AHCI Device Types ***/
+#define AHCI_INTEL_ICH9 (PCI_DEVICE_ID_INTEL_Q35_AHCI << 16 | \
+ PCI_VENDOR_ID_INTEL)
+
+/*** AHCI/HBA Register Offsets and Bitmasks ***/
+#define AHCI_CAP (0)
+#define AHCI_CAP_NP (0x1F)
+#define AHCI_CAP_SXS (0x20)
+#define AHCI_CAP_EMS (0x40)
+#define AHCI_CAP_CCCS (0x80)
+#define AHCI_CAP_NCS (0x1F00)
+#define AHCI_CAP_PSC (0x2000)
+#define AHCI_CAP_SSC (0x4000)
+#define AHCI_CAP_PMD (0x8000)
+#define AHCI_CAP_FBSS (0x10000)
+#define AHCI_CAP_SPM (0x20000)
+#define AHCI_CAP_SAM (0x40000)
+#define AHCI_CAP_RESERVED (0x80000)
+#define AHCI_CAP_ISS (0xF00000)
+#define AHCI_CAP_SCLO (0x1000000)
+#define AHCI_CAP_SAL (0x2000000)
+#define AHCI_CAP_SALP (0x4000000)
+#define AHCI_CAP_SSS (0x8000000)
+#define AHCI_CAP_SMPS (0x10000000)
+#define AHCI_CAP_SSNTF (0x20000000)
+#define AHCI_CAP_SNCQ (0x40000000)
+#define AHCI_CAP_S64A (0x80000000)
+
+#define AHCI_GHC (1)
+#define AHCI_GHC_HR (0x01)
+#define AHCI_GHC_IE (0x02)
+#define AHCI_GHC_MRSM (0x04)
+#define AHCI_GHC_RESERVED (0x7FFFFFF8)
+#define AHCI_GHC_AE (0x80000000)
+
+#define AHCI_IS (2)
+#define AHCI_PI (3)
+#define AHCI_VS (4)
+
+#define AHCI_CCCCTL (5)
+#define AHCI_CCCCTL_EN (0x01)
+#define AHCI_CCCCTL_RESERVED (0x06)
+#define AHCI_CCCCTL_CC (0xFF00)
+#define AHCI_CCCCTL_TV (0xFFFF0000)
+
+#define AHCI_CCCPORTS (6)
+#define AHCI_EMLOC (7)
+
+#define AHCI_EMCTL (8)
+#define AHCI_EMCTL_STSMR (0x01)
+#define AHCI_EMCTL_CTLTM (0x100)
+#define AHCI_EMCTL_CTLRST (0x200)
+#define AHCI_EMCTL_RESERVED (0xF0F0FCFE)
+
+#define AHCI_CAP2 (9)
+#define AHCI_CAP2_BOH (0x01)
+#define AHCI_CAP2_NVMP (0x02)
+#define AHCI_CAP2_APST (0x04)
+#define AHCI_CAP2_RESERVED (0xFFFFFFF8)
+
+#define AHCI_BOHC (10)
+#define AHCI_RESERVED (11)
+#define AHCI_NVMHCI (24)
+#define AHCI_VENDOR (40)
+#define AHCI_PORTS (64)
+
+/*** Port Memory Offsets & Bitmasks ***/
+#define AHCI_PX_CLB (0)
+#define AHCI_PX_CLB_RESERVED (0x1FF)
+
+#define AHCI_PX_CLBU (1)
+
+#define AHCI_PX_FB (2)
+#define AHCI_PX_FB_RESERVED (0xFF)
+
+#define AHCI_PX_FBU (3)
+
+#define AHCI_PX_IS (4)
+#define AHCI_PX_IS_DHRS (0x1)
+#define AHCI_PX_IS_PSS (0x2)
+#define AHCI_PX_IS_DSS (0x4)
+#define AHCI_PX_IS_SDBS (0x8)
+#define AHCI_PX_IS_UFS (0x10)
+#define AHCI_PX_IS_DPS (0x20)
+#define AHCI_PX_IS_PCS (0x40)
+#define AHCI_PX_IS_DMPS (0x80)
+#define AHCI_PX_IS_RESERVED (0x23FFF00)
+#define AHCI_PX_IS_PRCS (0x400000)
+#define AHCI_PX_IS_IPMS (0x800000)
+#define AHCI_PX_IS_OFS (0x1000000)
+#define AHCI_PX_IS_INFS (0x4000000)
+#define AHCI_PX_IS_IFS (0x8000000)
+#define AHCI_PX_IS_HBDS (0x10000000)
+#define AHCI_PX_IS_HBFS (0x20000000)
+#define AHCI_PX_IS_TFES (0x40000000)
+#define AHCI_PX_IS_CPDS (0x80000000)
+
+#define AHCI_PX_IE (5)
+#define AHCI_PX_IE_DHRE (0x1)
+#define AHCI_PX_IE_PSE (0x2)
+#define AHCI_PX_IE_DSE (0x4)
+#define AHCI_PX_IE_SDBE (0x8)
+#define AHCI_PX_IE_UFE (0x10)
+#define AHCI_PX_IE_DPE (0x20)
+#define AHCI_PX_IE_PCE (0x40)
+#define AHCI_PX_IE_DMPE (0x80)
+#define AHCI_PX_IE_RESERVED (0x23FFF00)
+#define AHCI_PX_IE_PRCE (0x400000)
+#define AHCI_PX_IE_IPME (0x800000)
+#define AHCI_PX_IE_OFE (0x1000000)
+#define AHCI_PX_IE_INFE (0x4000000)
+#define AHCI_PX_IE_IFE (0x8000000)
+#define AHCI_PX_IE_HBDE (0x10000000)
+#define AHCI_PX_IE_HBFE (0x20000000)
+#define AHCI_PX_IE_TFEE (0x40000000)
+#define AHCI_PX_IE_CPDE (0x80000000)
+
+#define AHCI_PX_CMD (6)
+#define AHCI_PX_CMD_ST (0x1)
+#define AHCI_PX_CMD_SUD (0x2)
+#define AHCI_PX_CMD_POD (0x4)
+#define AHCI_PX_CMD_CLO (0x8)
+#define AHCI_PX_CMD_FRE (0x10)
+#define AHCI_PX_CMD_RESERVED (0xE0)
+#define AHCI_PX_CMD_CCS (0x1F00)
+#define AHCI_PX_CMD_MPSS (0x2000)
+#define AHCI_PX_CMD_FR (0x4000)
+#define AHCI_PX_CMD_CR (0x8000)
+#define AHCI_PX_CMD_CPS (0x10000)
+#define AHCI_PX_CMD_PMA (0x20000)
+#define AHCI_PX_CMD_HPCP (0x40000)
+#define AHCI_PX_CMD_MPSP (0x80000)
+#define AHCI_PX_CMD_CPD (0x100000)
+#define AHCI_PX_CMD_ESP (0x200000)
+#define AHCI_PX_CMD_FBSCP (0x400000)
+#define AHCI_PX_CMD_APSTE (0x800000)
+#define AHCI_PX_CMD_ATAPI (0x1000000)
+#define AHCI_PX_CMD_DLAE (0x2000000)
+#define AHCI_PX_CMD_ALPE (0x4000000)
+#define AHCI_PX_CMD_ASP (0x8000000)
+#define AHCI_PX_CMD_ICC (0xF0000000)
+
+#define AHCI_PX_RES1 (7)
+
+#define AHCI_PX_TFD (8)
+#define AHCI_PX_TFD_STS (0xFF)
+#define AHCI_PX_TFD_STS_ERR (0x01)
+#define AHCI_PX_TFD_STS_CS1 (0x06)
+#define AHCI_PX_TFD_STS_DRQ (0x08)
+#define AHCI_PX_TFD_STS_CS2 (0x70)
+#define AHCI_PX_TFD_STS_BSY (0x80)
+#define AHCI_PX_TFD_ERR (0xFF00)
+#define AHCI_PX_TFD_RESERVED (0xFFFF0000)
+
+#define AHCI_PX_SIG (9)
+#define AHCI_PX_SIG_SECTOR_COUNT (0xFF)
+#define AHCI_PX_SIG_LBA_LOW (0xFF00)
+#define AHCI_PX_SIG_LBA_MID (0xFF0000)
+#define AHCI_PX_SIG_LBA_HIGH (0xFF000000)
+
+#define AHCI_PX_SSTS (10)
+#define AHCI_PX_SSTS_DET (0x0F)
+#define AHCI_PX_SSTS_SPD (0xF0)
+#define AHCI_PX_SSTS_IPM (0xF00)
+#define AHCI_PX_SSTS_RESERVED (0xFFFFF000)
+#define SSTS_DET_NO_DEVICE (0x00)
+#define SSTS_DET_PRESENT (0x01)
+#define SSTS_DET_ESTABLISHED (0x03)
+#define SSTS_DET_OFFLINE (0x04)
+
+#define AHCI_PX_SCTL (11)
+
+#define AHCI_PX_SERR (12)
+#define AHCI_PX_SERR_ERR (0xFFFF)
+#define AHCI_PX_SERR_DIAG (0xFFFF0000)
+#define AHCI_PX_SERR_DIAG_X (0x04000000)
+
+#define AHCI_PX_SACT (13)
+#define AHCI_PX_CI (14)
+#define AHCI_PX_SNTF (15)
+
+#define AHCI_PX_FBS (16)
+#define AHCI_PX_FBS_EN (0x1)
+#define AHCI_PX_FBS_DEC (0x2)
+#define AHCI_PX_FBS_SDE (0x4)
+#define AHCI_PX_FBS_DEV (0xF00)
+#define AHCI_PX_FBS_ADO (0xF000)
+#define AHCI_PX_FBS_DWE (0xF0000)
+#define AHCI_PX_FBS_RESERVED (0xFFF000F8)
+
+#define AHCI_PX_RES2 (17)
+#define AHCI_PX_VS (28)
+
+#define HBA_DATA_REGION_SIZE (256)
+#define HBA_PORT_DATA_SIZE (128)
+#define HBA_PORT_NUM_REG (HBA_PORT_DATA_SIZE/4)
+
+#define AHCI_VERSION_0_95 (0x00000905)
+#define AHCI_VERSION_1_0 (0x00010000)
+#define AHCI_VERSION_1_1 (0x00010100)
+#define AHCI_VERSION_1_2 (0x00010200)
+#define AHCI_VERSION_1_3 (0x00010300)
+
+#define AHCI_SECTOR_SIZE (512)
+
+/* FIS types */
+enum {
+ REG_H2D_FIS = 0x27,
+ REG_D2H_FIS = 0x34,
+ DMA_ACTIVATE_FIS = 0x39,
+ DMA_SETUP_FIS = 0x41,
+ DATA_FIS = 0x46,
+ BIST_ACTIVATE_FIS = 0x58,
+ PIO_SETUP_FIS = 0x5F,
+ SDB_FIS = 0xA1
+};
+
+/* FIS flags */
+#define REG_H2D_FIS_CMD 0x80
+
+/* ATA Commands */
+enum {
+ /* DMA */
+ CMD_READ_DMA = 0xC8,
+ CMD_READ_DMA_EXT = 0x25,
+ CMD_WRITE_DMA = 0xCA,
+ CMD_WRITE_DMA_EXT = 0x35,
+ /* PIO */
+ CMD_READ_PIO = 0x20,
+ CMD_READ_PIO_EXT = 0x24,
+ CMD_WRITE_PIO = 0x30,
+ CMD_WRITE_PIO_EXT = 0x34,
+ /* Misc */
+ CMD_READ_MAX = 0xF8,
+ CMD_READ_MAX_EXT = 0x27,
+ CMD_FLUSH_CACHE = 0xE7,
+ CMD_IDENTIFY = 0xEC
+};
+
+/* AHCI Command Header Flags & Masks*/
+#define CMDH_CFL (0x1F)
+#define CMDH_ATAPI (0x20)
+#define CMDH_WRITE (0x40)
+#define CMDH_PREFETCH (0x80)
+#define CMDH_RESET (0x100)
+#define CMDH_BIST (0x200)
+#define CMDH_CLR_BSY (0x400)
+#define CMDH_RES (0x800)
+#define CMDH_PMP (0xF000)
+
+/* ATA device register masks */
+#define ATA_DEVICE_MAGIC 0xA0
+#define ATA_DEVICE_LBA 0x40
+#define ATA_DEVICE_DRIVE 0x10
+#define ATA_DEVICE_HEAD 0x0F
+
+/*** Structures ***/
+
+typedef struct AHCIPortQState {
+ uint64_t fb;
+ uint64_t clb;
+ uint64_t ctba[32];
+ uint16_t prdtl[32];
+ uint8_t next; /** Next Command Slot to Use **/
+} AHCIPortQState;
+
+typedef struct AHCIQState {
+ QOSState *parent;
+ QPCIDevice *dev;
+ void *hba_base;
+ uint64_t barsize;
+ uint32_t fingerprint;
+ uint32_t cap;
+ uint32_t cap2;
+ AHCIPortQState port[32];
+} AHCIQState;
+
+/**
+ * Generic FIS structure.
+ */
+typedef struct FIS {
+ uint8_t fis_type;
+ uint8_t flags;
+ char data[0];
+} __attribute__((__packed__)) FIS;
+
+/**
+ * Register device-to-host FIS structure.
+ */
+typedef struct RegD2HFIS {
+ /* DW0 */
+ uint8_t fis_type;
+ uint8_t flags;
+ uint8_t status;
+ uint8_t error;
+ /* DW1 */
+ uint8_t lba_lo[3];
+ uint8_t device;
+ /* DW2 */
+ uint8_t lba_hi[3];
+ uint8_t res0;
+ /* DW3 */
+ uint16_t count;
+ uint16_t res1;
+ /* DW4 */
+ uint32_t res2;
+} __attribute__((__packed__)) RegD2HFIS;
+
+/**
+ * Register device-to-host FIS structure;
+ * PIO Setup variety.
+ */
+typedef struct PIOSetupFIS {
+ /* DW0 */
+ uint8_t fis_type;
+ uint8_t flags;
+ uint8_t status;
+ uint8_t error;
+ /* DW1 */
+ uint8_t lba_lo[3];
+ uint8_t device;
+ /* DW2 */
+ uint8_t lba_hi[3];
+ uint8_t res0;
+ /* DW3 */
+ uint16_t count;
+ uint8_t res1;
+ uint8_t e_status;
+ /* DW4 */
+ uint16_t tx_count;
+ uint16_t res2;
+} __attribute__((__packed__)) PIOSetupFIS;
+
+/**
+ * Register host-to-device FIS structure.
+ */
+typedef struct RegH2DFIS {
+ /* DW0 */
+ uint8_t fis_type;
+ uint8_t flags;
+ uint8_t command;
+ uint8_t feature_low;
+ /* DW1 */
+ uint8_t lba_lo[3];
+ uint8_t device;
+ /* DW2 */
+ uint8_t lba_hi[3];
+ uint8_t feature_high;
+ /* DW3 */
+ uint16_t count;
+ uint8_t icc;
+ uint8_t control;
+ /* DW4 */
+ uint8_t aux[4];
+} __attribute__((__packed__)) RegH2DFIS;
+
+/**
+ * Command List entry structure.
+ * The command list contains between 1-32 of these structures.
+ */
+typedef struct AHCICommandHeader {
+ uint16_t flags; /* Cmd-Fis-Len, PMP#, and flags. */
+ uint16_t prdtl; /* Phys Region Desc. Table Length */
+ uint32_t prdbc; /* Phys Region Desc. Byte Count */
+ uint64_t ctba; /* Command Table Descriptor Base Address */
+ uint32_t res[4];
+} __attribute__((__packed__)) AHCICommandHeader;
+
+/**
+ * Physical Region Descriptor; pointed to by the Command List Header,
+ * struct ahci_command.
+ */
+typedef struct PRD {
+ uint64_t dba; /* Data Base Address */
+ uint32_t res; /* Reserved */
+ uint32_t dbc; /* Data Byte Count (0-indexed) & Interrupt Flag (bit 2^31) */
+} __attribute__((__packed__)) PRD;
+
+/* Opaque, defined within ahci.c */
+typedef struct AHCICommand AHCICommand;
+
+/*** Macro Utilities ***/
+#define BITANY(data, mask) (((data) & (mask)) != 0)
+#define BITSET(data, mask) (((data) & (mask)) == (mask))
+#define BITCLR(data, mask) (((data) & (mask)) == 0)
+#define ASSERT_BIT_SET(data, mask) g_assert_cmphex((data) & (mask), ==, (mask))
+#define ASSERT_BIT_CLEAR(data, mask) g_assert_cmphex((data) & (mask), ==, 0)
+
+/* For calculating how big the PRD table needs to be: */
+#define CMD_TBL_SIZ(n) ((0x80 + ((n) * sizeof(PRD)) + 0x7F) & ~0x7F)
+
+/* Helpers for reading/writing AHCI HBA register values */
+
+static inline uint32_t ahci_mread(AHCIQState *ahci, size_t offset)
+{
+ return qpci_io_readl(ahci->dev, ahci->hba_base + offset);
+}
+
+static inline void ahci_mwrite(AHCIQState *ahci, size_t offset, uint32_t value)
+{
+ qpci_io_writel(ahci->dev, ahci->hba_base + offset, value);
+}
+
+static inline uint32_t ahci_rreg(AHCIQState *ahci, uint32_t reg_num)
+{
+ return ahci_mread(ahci, 4 * reg_num);
+}
+
+static inline void ahci_wreg(AHCIQState *ahci, uint32_t reg_num, uint32_t value)
+{
+ ahci_mwrite(ahci, 4 * reg_num, value);
+}
+
+static inline void ahci_set(AHCIQState *ahci, uint32_t reg_num, uint32_t mask)
+{
+ ahci_wreg(ahci, reg_num, ahci_rreg(ahci, reg_num) | mask);
+}
+
+static inline void ahci_clr(AHCIQState *ahci, uint32_t reg_num, uint32_t mask)
+{
+ ahci_wreg(ahci, reg_num, ahci_rreg(ahci, reg_num) & ~mask);
+}
+
+static inline size_t ahci_px_offset(uint8_t port, uint32_t reg_num)
+{
+ return AHCI_PORTS + (HBA_PORT_NUM_REG * port) + reg_num;
+}
+
+static inline uint32_t ahci_px_rreg(AHCIQState *ahci, uint8_t port,
+ uint32_t reg_num)
+{
+ return ahci_rreg(ahci, ahci_px_offset(port, reg_num));
+}
+
+static inline void ahci_px_wreg(AHCIQState *ahci, uint8_t port,
+ uint32_t reg_num, uint32_t value)
+{
+ ahci_wreg(ahci, ahci_px_offset(port, reg_num), value);
+}
+
+static inline void ahci_px_set(AHCIQState *ahci, uint8_t port,
+ uint32_t reg_num, uint32_t mask)
+{
+ ahci_px_wreg(ahci, port, reg_num,
+ ahci_px_rreg(ahci, port, reg_num) | mask);
+}
+
+static inline void ahci_px_clr(AHCIQState *ahci, uint8_t port,
+ uint32_t reg_num, uint32_t mask)
+{
+ ahci_px_wreg(ahci, port, reg_num,
+ ahci_px_rreg(ahci, port, reg_num) & ~mask);
+}
+
+/*** Prototypes ***/
+uint64_t ahci_alloc(AHCIQState *ahci, size_t bytes);
+void ahci_free(AHCIQState *ahci, uint64_t addr);
+QPCIDevice *get_ahci_device(uint32_t *fingerprint);
+void free_ahci_device(QPCIDevice *dev);
+void ahci_clean_mem(AHCIQState *ahci);
+void ahci_pci_enable(AHCIQState *ahci);
+void start_ahci_device(AHCIQState *ahci);
+void ahci_hba_enable(AHCIQState *ahci);
+unsigned ahci_port_select(AHCIQState *ahci);
+void ahci_port_clear(AHCIQState *ahci, uint8_t port);
+void ahci_port_check_error(AHCIQState *ahci, uint8_t port);
+void ahci_port_check_interrupts(AHCIQState *ahci, uint8_t port,
+ uint32_t intr_mask);
+void ahci_port_check_nonbusy(AHCIQState *ahci, uint8_t port, uint8_t slot);
+void ahci_port_check_d2h_sanity(AHCIQState *ahci, uint8_t port, uint8_t slot);
+void ahci_port_check_pio_sanity(AHCIQState *ahci, uint8_t port,
+ uint8_t slot, size_t buffsize);
+void ahci_port_check_cmd_sanity(AHCIQState *ahci, uint8_t port,
+ uint8_t slot, size_t buffsize);
+void ahci_get_command_header(AHCIQState *ahci, uint8_t port,
+ uint8_t slot, AHCICommandHeader *cmd);
+void ahci_set_command_header(AHCIQState *ahci, uint8_t port,
+ uint8_t slot, AHCICommandHeader *cmd);
+void ahci_destroy_command(AHCIQState *ahci, uint8_t port, uint8_t slot);
+void ahci_write_fis(AHCIQState *ahci, RegH2DFIS *fis, uint64_t addr);
+unsigned ahci_pick_cmd(AHCIQState *ahci, uint8_t port);
+unsigned size_to_prdtl(unsigned bytes, unsigned bytes_per_prd);
+void ahci_guest_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd,
+ uint64_t gbuffer, size_t size);
+void ahci_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd,
+ void *buffer, size_t bufsize);
+
+/* Command Lifecycle */
+AHCICommand *ahci_command_create(uint8_t command_name);
+void ahci_command_commit(AHCIQState *ahci, AHCICommand *cmd, uint8_t port);
+void ahci_command_issue(AHCIQState *ahci, AHCICommand *cmd);
+void ahci_command_issue_async(AHCIQState *ahci, AHCICommand *cmd);
+void ahci_command_wait(AHCIQState *ahci, AHCICommand *cmd);
+void ahci_command_verify(AHCIQState *ahci, AHCICommand *cmd);
+void ahci_command_free(AHCICommand *cmd);
+
+/* Command adjustments */
+void ahci_command_set_buffer(AHCICommand *cmd, uint64_t buffer);
+void ahci_command_set_size(AHCICommand *cmd, uint64_t xbytes);
+void ahci_command_set_prd_size(AHCICommand *cmd, unsigned prd_size);
+void ahci_command_set_sizes(AHCICommand *cmd, uint64_t xbytes,
+ unsigned prd_size);
+
+/* Command Misc */
+uint8_t ahci_command_slot(AHCICommand *cmd);
+
+#endif
diff --git a/tests/libqos/libqos-pc.c b/tests/libqos/libqos-pc.c
new file mode 100644
index 0000000000..bbace893fb
--- /dev/null
+++ b/tests/libqos/libqos-pc.c
@@ -0,0 +1,24 @@
+#include "libqos/libqos-pc.h"
+#include "libqos/malloc-pc.h"
+
+static QOSOps qos_ops = {
+ .init_allocator = pc_alloc_init_flags,
+ .uninit_allocator = pc_alloc_uninit
+};
+
+QOSState *qtest_pc_boot(const char *cmdline_fmt, ...)
+{
+ QOSState *qs;
+ va_list ap;
+
+ va_start(ap, cmdline_fmt);
+ qs = qtest_vboot(&qos_ops, cmdline_fmt, ap);
+ va_end(ap);
+
+ return qs;
+}
+
+void qtest_pc_shutdown(QOSState *qs)
+{
+ return qtest_shutdown(qs);
+}
diff --git a/tests/libqos/libqos-pc.h b/tests/libqos/libqos-pc.h
new file mode 100644
index 0000000000..316857d32f
--- /dev/null
+++ b/tests/libqos/libqos-pc.h
@@ -0,0 +1,9 @@
+#ifndef __libqos_pc_h
+#define __libqos_pc_h
+
+#include "libqos/libqos.h"
+
+QOSState *qtest_pc_boot(const char *cmdline_fmt, ...);
+void qtest_pc_shutdown(QOSState *qs);
+
+#endif
diff --git a/tests/libqos/libqos.c b/tests/libqos/libqos.c
new file mode 100644
index 0000000000..bc8beb281f
--- /dev/null
+++ b/tests/libqos/libqos.c
@@ -0,0 +1,63 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <glib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+
+#include "libqtest.h"
+#include "libqos/libqos.h"
+#include "libqos/pci.h"
+
+/*** Test Setup & Teardown ***/
+
+/**
+ * Launch QEMU with the given command line,
+ * and then set up interrupts and our guest malloc interface.
+ */
+QOSState *qtest_vboot(QOSOps *ops, const char *cmdline_fmt, va_list ap)
+{
+ char *cmdline;
+
+ struct QOSState *qs = g_malloc(sizeof(QOSState));
+
+ cmdline = g_strdup_vprintf(cmdline_fmt, ap);
+ qs->qts = qtest_start(cmdline);
+ qs->ops = ops;
+ qtest_irq_intercept_in(global_qtest, "ioapic");
+ if (ops && ops->init_allocator) {
+ qs->alloc = ops->init_allocator(ALLOC_NO_FLAGS);
+ }
+
+ g_free(cmdline);
+ return qs;
+}
+
+/**
+ * Launch QEMU with the given command line,
+ * and then set up interrupts and our guest malloc interface.
+ */
+QOSState *qtest_boot(QOSOps *ops, const char *cmdline_fmt, ...)
+{
+ QOSState *qs;
+ va_list ap;
+
+ va_start(ap, cmdline_fmt);
+ qs = qtest_vboot(ops, cmdline_fmt, ap);
+ va_end(ap);
+
+ return qs;
+}
+
+/**
+ * Tear down the QEMU instance.
+ */
+void qtest_shutdown(QOSState *qs)
+{
+ if (qs->alloc && qs->ops && qs->ops->uninit_allocator) {
+ qs->ops->uninit_allocator(qs->alloc);
+ qs->alloc = NULL;
+ }
+ qtest_quit(qs->qts);
+ g_free(qs);
+}
diff --git a/tests/libqos/libqos.h b/tests/libqos/libqos.h
new file mode 100644
index 0000000000..612d41e5e9
--- /dev/null
+++ b/tests/libqos/libqos.h
@@ -0,0 +1,33 @@
+#ifndef __libqos_h
+#define __libqos_h
+
+#include "libqtest.h"
+#include "libqos/pci.h"
+#include "libqos/malloc-pc.h"
+
+typedef struct QOSOps {
+ QGuestAllocator *(*init_allocator)(QAllocOpts);
+ void (*uninit_allocator)(QGuestAllocator *);
+} QOSOps;
+
+typedef struct QOSState {
+ QTestState *qts;
+ QGuestAllocator *alloc;
+ QOSOps *ops;
+} QOSState;
+
+QOSState *qtest_vboot(QOSOps *ops, const char *cmdline_fmt, va_list ap);
+QOSState *qtest_boot(QOSOps *ops, const char *cmdline_fmt, ...);
+void qtest_shutdown(QOSState *qs);
+
+static inline uint64_t qmalloc(QOSState *q, size_t bytes)
+{
+ return guest_alloc(q->alloc, bytes);
+}
+
+static inline void qfree(QOSState *q, uint64_t addr)
+{
+ guest_free(q->alloc, addr);
+}
+
+#endif
diff --git a/tests/libqos/malloc-pc.c b/tests/libqos/malloc-pc.c
index c9c48fddc9..6e253b6877 100644
--- a/tests/libqos/malloc-pc.c
+++ b/tests/libqos/malloc-pc.c
@@ -32,31 +32,17 @@ void pc_alloc_uninit(QGuestAllocator *allocator)
QGuestAllocator *pc_alloc_init_flags(QAllocOpts flags)
{
- QGuestAllocator *s = g_malloc0(sizeof(*s));
+ QGuestAllocator *s;
uint64_t ram_size;
QFWCFG *fw_cfg = pc_fw_cfg_init();
- MemBlock *node;
-
- s->opts = flags;
- s->page_size = PAGE_SIZE;
ram_size = qfw_cfg_get_u64(fw_cfg, FW_CFG_RAM_SIZE);
-
- /* Start at 1MB */
- s->start = 1 << 20;
-
- /* Respect PCI hole */
- s->end = MIN(ram_size, 0xE0000000);
+ s = alloc_init_flags(flags, 1 << 20, MIN(ram_size, 0xE0000000));
+ alloc_set_page_size(s, PAGE_SIZE);
/* clean-up */
g_free(fw_cfg);
- QTAILQ_INIT(&s->used);
- QTAILQ_INIT(&s->free);
-
- node = mlist_new(s->start, s->end - s->start);
- QTAILQ_INSERT_HEAD(&s->free, node, MLIST_ENTNAME);
-
return s;
}
diff --git a/tests/libqos/malloc.c b/tests/libqos/malloc.c
index 5debf18497..67f31902fd 100644
--- a/tests/libqos/malloc.c
+++ b/tests/libqos/malloc.c
@@ -16,6 +16,26 @@
#include <inttypes.h>
#include <glib.h>
+typedef QTAILQ_HEAD(MemList, MemBlock) MemList;
+
+typedef struct MemBlock {
+ QTAILQ_ENTRY(MemBlock) MLIST_ENTNAME;
+ uint64_t size;
+ uint64_t addr;
+} MemBlock;
+
+struct QGuestAllocator {
+ QAllocOpts opts;
+ uint64_t start;
+ uint64_t end;
+ uint32_t page_size;
+
+ MemList used;
+ MemList free;
+};
+
+#define DEFAULT_PAGE_SIZE 4096
+
static void mlist_delete(MemList *list, MemBlock *node)
{
g_assert(list && node);
@@ -103,6 +123,21 @@ static void mlist_coalesce(MemList *head, MemBlock *node)
} while (merge);
}
+static MemBlock *mlist_new(uint64_t addr, uint64_t size)
+{
+ MemBlock *block;
+
+ if (!size) {
+ return NULL;
+ }
+ block = g_malloc0(sizeof(MemBlock));
+
+ block->addr = addr;
+ block->size = size;
+
+ return block;
+}
+
static uint64_t mlist_fulfill(QGuestAllocator *s, MemBlock *freenode,
uint64_t size)
{
@@ -187,21 +222,6 @@ static void mlist_free(QGuestAllocator *s, uint64_t addr)
mlist_coalesce(&s->free, node);
}
-MemBlock *mlist_new(uint64_t addr, uint64_t size)
-{
- MemBlock *block;
-
- if (!size) {
- return NULL;
- }
- block = g_malloc0(sizeof(MemBlock));
-
- block->addr = addr;
- block->size = size;
-
- return block;
-}
-
/*
* Mostly for valgrind happiness, but it does offer
* a chokepoint for debugging guest memory leaks, too.
@@ -268,3 +288,44 @@ void guest_free(QGuestAllocator *allocator, uint64_t addr)
mlist_check(allocator);
}
}
+
+QGuestAllocator *alloc_init(uint64_t start, uint64_t end)
+{
+ QGuestAllocator *s = g_malloc0(sizeof(*s));
+ MemBlock *node;
+
+ s->start = start;
+ s->end = end;
+
+ QTAILQ_INIT(&s->used);
+ QTAILQ_INIT(&s->free);
+
+ node = mlist_new(s->start, s->end - s->start);
+ QTAILQ_INSERT_HEAD(&s->free, node, MLIST_ENTNAME);
+
+ s->page_size = DEFAULT_PAGE_SIZE;
+
+ return s;
+}
+
+QGuestAllocator *alloc_init_flags(QAllocOpts opts,
+ uint64_t start, uint64_t end)
+{
+ QGuestAllocator *s = alloc_init(start, end);
+ s->opts = opts;
+ return s;
+}
+
+void alloc_set_page_size(QGuestAllocator *allocator, size_t page_size)
+{
+ /* Can't alter the page_size for an allocator in-use */
+ g_assert(QTAILQ_EMPTY(&allocator->used));
+
+ g_assert(is_power_of_2(page_size));
+ allocator->page_size = page_size;
+}
+
+void alloc_set_flags(QGuestAllocator *allocator, QAllocOpts opts)
+{
+ allocator->opts |= opts;
+}
diff --git a/tests/libqos/malloc.h b/tests/libqos/malloc.h
index 465efeb8fb..71ac407dcd 100644
--- a/tests/libqos/malloc.h
+++ b/tests/libqos/malloc.h
@@ -17,8 +17,6 @@
#include <sys/types.h>
#include "qemu/queue.h"
-#define MLIST_ENTNAME entries
-
typedef enum {
ALLOC_NO_FLAGS = 0x00,
ALLOC_LEAK_WARN = 0x01,
@@ -26,28 +24,18 @@ typedef enum {
ALLOC_PARANOID = 0x04
} QAllocOpts;
-typedef QTAILQ_HEAD(MemList, MemBlock) MemList;
-typedef struct MemBlock {
- QTAILQ_ENTRY(MemBlock) MLIST_ENTNAME;
- uint64_t size;
- uint64_t addr;
-} MemBlock;
-
-typedef struct QGuestAllocator {
- QAllocOpts opts;
- uint64_t start;
- uint64_t end;
- uint32_t page_size;
+typedef struct QGuestAllocator QGuestAllocator;
- MemList used;
- MemList free;
-} QGuestAllocator;
-
-MemBlock *mlist_new(uint64_t addr, uint64_t size);
void alloc_uninit(QGuestAllocator *allocator);
/* Always returns page aligned values */
uint64_t guest_alloc(QGuestAllocator *allocator, size_t size);
void guest_free(QGuestAllocator *allocator, uint64_t addr);
+QGuestAllocator *alloc_init(uint64_t start, uint64_t end);
+QGuestAllocator *alloc_init_flags(QAllocOpts flags,
+ uint64_t start, uint64_t end);
+void alloc_set_page_size(QGuestAllocator *allocator, size_t page_size);
+void alloc_set_flags(QGuestAllocator *allocator, QAllocOpts opts);
+
#endif
diff --git a/tests/qemu-iotests/016.out b/tests/qemu-iotests/016.out
deleted file mode 100644
index acbd60b4a3..0000000000
--- a/tests/qemu-iotests/016.out
+++ /dev/null
@@ -1,23 +0,0 @@
-QA output created by 016
-Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
-
-== reading at EOF ==
-read 512/512 bytes at offset 134217728
-512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-
-== reading far past EOF ==
-read 512/512 bytes at offset 268435456
-512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-
-== writing at EOF ==
-wrote 512/512 bytes at offset 134217728
-512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-read 512/512 bytes at offset 134217728
-512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-
-== writing far past EOF ==
-wrote 512/512 bytes at offset 268435456
-512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-read 512/512 bytes at offset 268435456
-512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-*** done
diff --git a/tests/qemu-iotests/051 b/tests/qemu-iotests/051
index 11c858f27d..27138a2299 100755
--- a/tests/qemu-iotests/051
+++ b/tests/qemu-iotests/051
@@ -93,6 +93,7 @@ echo
run_qemu -drive file="$TEST_IMG",format=foo
run_qemu -drive file="$TEST_IMG",driver=foo
run_qemu -drive file="$TEST_IMG",driver=raw,format=qcow2
+run_qemu -drive file="$TEST_IMG",driver=qcow2,format=qcow2
echo
echo === Overriding backing file ===
diff --git a/tests/qemu-iotests/051.out b/tests/qemu-iotests/051.out
index f497c5717b..bf52bf02d4 100644
--- a/tests/qemu-iotests/051.out
+++ b/tests/qemu-iotests/051.out
@@ -5,43 +5,46 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file='TEST_DIR
=== Unknown option ===
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=: could not open disk image TEST_DIR/t.qcow2: Block format 'qcow2' used by device 'ide0-hd0' doesn't support the option 'unknown_opt'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=: Block format 'qcow2' used by device 'ide0-hd0' doesn't support the option 'unknown_opt'
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=on
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=on: could not open disk image TEST_DIR/t.qcow2: Block format 'qcow2' used by device 'ide0-hd0' doesn't support the option 'unknown_opt'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=on: Block format 'qcow2' used by device 'ide0-hd0' doesn't support the option 'unknown_opt'
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=1234
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=1234: could not open disk image TEST_DIR/t.qcow2: Block format 'qcow2' used by device 'ide0-hd0' doesn't support the option 'unknown_opt'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=1234: Block format 'qcow2' used by device 'ide0-hd0' doesn't support the option 'unknown_opt'
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=foo
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=foo: could not open disk image TEST_DIR/t.qcow2: Block format 'qcow2' used by device 'ide0-hd0' doesn't support the option 'unknown_opt'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,unknown_opt=foo: Block format 'qcow2' used by device 'ide0-hd0' doesn't support the option 'unknown_opt'
=== Unknown protocol option ===
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=: could not open disk image TEST_DIR/t.qcow2: Block protocol 'file' doesn't support the option 'unknown_opt'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=: Block protocol 'file' doesn't support the option 'unknown_opt'
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=on
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=on: could not open disk image TEST_DIR/t.qcow2: Block protocol 'file' doesn't support the option 'unknown_opt'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=on: Block protocol 'file' doesn't support the option 'unknown_opt'
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=1234
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=1234: could not open disk image TEST_DIR/t.qcow2: Block protocol 'file' doesn't support the option 'unknown_opt'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=1234: Block protocol 'file' doesn't support the option 'unknown_opt'
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=foo
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=foo: could not open disk image TEST_DIR/t.qcow2: Block protocol 'file' doesn't support the option 'unknown_opt'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,file.unknown_opt=foo: Block protocol 'file' doesn't support the option 'unknown_opt'
=== Invalid format ===
Testing: -drive file=TEST_DIR/t.qcow2,format=foo
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=foo: 'foo' invalid format
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=foo: Unknown driver 'foo'
Testing: -drive file=TEST_DIR/t.qcow2,driver=foo
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,driver=foo: could not open disk image TEST_DIR/t.qcow2: Unknown driver 'foo'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,driver=foo: Unknown driver 'foo'
Testing: -drive file=TEST_DIR/t.qcow2,driver=raw,format=qcow2
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,driver=raw,format=qcow2: could not open disk image TEST_DIR/t.qcow2: Driver specified twice
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,driver=raw,format=qcow2: Cannot specify both 'driver' and 'format'
+
+Testing: -drive file=TEST_DIR/t.qcow2,driver=qcow2,format=qcow2
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,driver=qcow2,format=qcow2: Cannot specify both 'driver' and 'format'
=== Overriding backing file ===
@@ -55,13 +58,13 @@ ide0-hd0: TEST_DIR/t.qcow2 (qcow2)
(qemu) qququiquit
Testing: -drive file=TEST_DIR/t.qcow2,driver=raw,backing.file.filename=TEST_DIR/t.qcow2.orig
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,driver=raw,backing.file.filename=TEST_DIR/t.qcow2.orig: could not open disk image TEST_DIR/t.qcow2: Driver doesn't support backing files
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,driver=raw,backing.file.filename=TEST_DIR/t.qcow2.orig: Driver doesn't support backing files
Testing: -drive file=TEST_DIR/t.qcow2,file.backing.driver=file,file.backing.filename=TEST_DIR/t.qcow2.orig
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,file.backing.driver=file,file.backing.filename=TEST_DIR/t.qcow2.orig: could not open disk image TEST_DIR/t.qcow2: Driver doesn't support backing files
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,file.backing.driver=file,file.backing.filename=TEST_DIR/t.qcow2.orig: Driver doesn't support backing files
Testing: -drive file=TEST_DIR/t.qcow2,file.backing.driver=qcow2,file.backing.file.filename=TEST_DIR/t.qcow2.orig
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,file.backing.driver=qcow2,file.backing.file.filename=TEST_DIR/t.qcow2.orig: could not open disk image TEST_DIR/t.qcow2: Driver doesn't support backing files
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,file.backing.driver=qcow2,file.backing.file.filename=TEST_DIR/t.qcow2.orig: Driver doesn't support backing files
=== Enable and disable lazy refcounting on the command line, plus some invalid values ===
@@ -75,20 +78,20 @@ QEMU X.Y.Z monitor - type 'help' for more information
(qemu) qququiquit
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=: could not open disk image TEST_DIR/t.qcow2: Parameter 'lazy-refcounts' expects 'on' or 'off'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=: Parameter 'lazy-refcounts' expects 'on' or 'off'
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=42
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=42: could not open disk image TEST_DIR/t.qcow2: Parameter 'lazy-refcounts' expects 'on' or 'off'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=42: Parameter 'lazy-refcounts' expects 'on' or 'off'
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=foo
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=foo: could not open disk image TEST_DIR/t.qcow2: Parameter 'lazy-refcounts' expects 'on' or 'off'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=foo: Parameter 'lazy-refcounts' expects 'on' or 'off'
=== With version 2 images enabling lazy refcounts must fail ===
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=on
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=on: could not open disk image TEST_DIR/t.qcow2: Lazy refcounts require a qcow2 image with at least qemu 1.1 compatibility level
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=on: Lazy refcounts require a qcow2 image with at least qemu 1.1 compatibility level
Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,lazy-refcounts=off
QEMU X.Y.Z monitor - type 'help' for more information
@@ -248,31 +251,31 @@ QEMU X.Y.Z monitor - type 'help' for more information
(qemu) qququiquit
Testing: -drive file=TEST_DIR/t.qcow2,file.driver=qcow2
-QEMU_PROG: -drive file=TEST_DIR/t.qcow2,file.driver=qcow2: could not open disk image TEST_DIR/t.qcow2: Block format 'qcow2' used by device '' doesn't support the option 'filename'
+QEMU_PROG: -drive file=TEST_DIR/t.qcow2,file.driver=qcow2: Block format 'qcow2' used by device '' doesn't support the option 'filename'
=== Leaving out required options ===
Testing: -drive driver=file
-QEMU_PROG: -drive driver=file: could not open disk image ide0-hd0: The 'file' block driver requires a file name
+QEMU_PROG: -drive driver=file: The 'file' block driver requires a file name
Testing: -drive driver=nbd
-QEMU_PROG: -drive driver=nbd: could not open disk image ide0-hd0: one of path and host must be specified.
+QEMU_PROG: -drive driver=nbd: one of path and host must be specified.
Testing: -drive driver=raw
-QEMU_PROG: -drive driver=raw: could not open disk image ide0-hd0: Can't use 'raw' as a block driver for the protocol level
+QEMU_PROG: -drive driver=raw: Can't use 'raw' as a block driver for the protocol level
Testing: -drive file.driver=file
-QEMU_PROG: -drive file.driver=file: could not open disk image ide0-hd0: The 'file' block driver requires a file name
+QEMU_PROG: -drive file.driver=file: The 'file' block driver requires a file name
Testing: -drive file.driver=nbd
-QEMU_PROG: -drive file.driver=nbd: could not open disk image ide0-hd0: one of path and host must be specified.
+QEMU_PROG: -drive file.driver=nbd: one of path and host must be specified.
Testing: -drive file.driver=raw
-QEMU_PROG: -drive file.driver=raw: could not open disk image ide0-hd0: Can't use 'raw' as a block driver for the protocol level
+QEMU_PROG: -drive file.driver=raw: Can't use 'raw' as a block driver for the protocol level
Testing: -drive foo=bar
-QEMU_PROG: -drive foo=bar: could not open disk image ide0-hd0: Must specify either driver or file
+QEMU_PROG: -drive foo=bar: Must specify either driver or file
=== Specifying both an option and its legacy alias ===
@@ -323,13 +326,13 @@ QEMU_PROG: -drive file=TEST_DIR/t.qcow2,readonly=on,read-only=off: 'read-only' a
=== Parsing protocol from file name ===
Testing: -hda foo:bar
-QEMU_PROG: -hda foo:bar: could not open disk image foo:bar: Unknown protocol
+QEMU_PROG: -hda foo:bar: Unknown protocol 'foo'
Testing: -drive file=foo:bar
-QEMU_PROG: -drive file=foo:bar: could not open disk image foo:bar: Unknown protocol
+QEMU_PROG: -drive file=foo:bar: Unknown protocol 'foo'
Testing: -drive file.filename=foo:bar
-QEMU_PROG: -drive file.filename=foo:bar: could not open disk image ide0-hd0: Could not open 'foo:bar': No such file or directory
+QEMU_PROG: -drive file.filename=foo:bar: Could not open 'foo:bar': No such file or directory
Testing: -hda file:TEST_DIR/t.qcow2
QEMU X.Y.Z monitor - type 'help' for more information
@@ -340,7 +343,7 @@ QEMU X.Y.Z monitor - type 'help' for more information
(qemu) qququiquit
Testing: -drive file.filename=file:TEST_DIR/t.qcow2
-QEMU_PROG: -drive file.filename=file:TEST_DIR/t.qcow2: could not open disk image ide0-hd0: Could not open 'file:TEST_DIR/t.qcow2': No such file or directory
+QEMU_PROG: -drive file.filename=file:TEST_DIR/t.qcow2: Could not open 'file:TEST_DIR/t.qcow2': No such file or directory
=== Snapshot mode ===
diff --git a/tests/qemu-iotests/067.out b/tests/qemu-iotests/067.out
index 13ff3cd7aa..00b3eaefc7 100644
--- a/tests/qemu-iotests/067.out
+++ b/tests/qemu-iotests/067.out
@@ -43,6 +43,7 @@ Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,if=none,id=disk -device virti
"drv": "qcow2",
"iops": 0,
"bps_wr": 0,
+ "write_threshold": 0,
"encrypted": false,
"bps": 0,
"bps_rd": 0,
@@ -218,6 +219,7 @@ Testing: -drive file=TEST_DIR/t.qcow2,format=qcow2,if=none,id=disk
"drv": "qcow2",
"iops": 0,
"bps_wr": 0,
+ "write_threshold": 0,
"encrypted": false,
"bps": 0,
"bps_rd": 0,
@@ -423,6 +425,7 @@ Testing:
"drv": "qcow2",
"iops": 0,
"bps_wr": 0,
+ "write_threshold": 0,
"encrypted": false,
"bps": 0,
"bps_rd": 0,
@@ -607,6 +610,7 @@ Testing:
"drv": "qcow2",
"iops": 0,
"bps_wr": 0,
+ "write_threshold": 0,
"encrypted": false,
"bps": 0,
"bps_rd": 0,
@@ -717,6 +721,7 @@ Testing:
"drv": "qcow2",
"iops": 0,
"bps_wr": 0,
+ "write_threshold": 0,
"encrypted": false,
"bps": 0,
"bps_rd": 0,
diff --git a/tests/qemu-iotests/083 b/tests/qemu-iotests/083
index 991a9d91db..1b2d3f1156 100755
--- a/tests/qemu-iotests/083
+++ b/tests/qemu-iotests/083
@@ -56,7 +56,8 @@ filter_nbd() {
#
# Filter out the TCP port number since this changes between runs.
sed -e 's#^.*nbd\.c:.*##g' \
- -e 's#nbd:127\.0\.0\.1:[^:]*:#nbd:127\.0\.0\.1:PORT:#g'
+ -e 's#nbd:127\.0\.0\.1:[^:]*:#nbd:127\.0\.0\.1:PORT:#g' \
+ -e 's#\(exportname=foo\|PORT\): Failed to .*$#\1#'
}
check_disconnect() {
diff --git a/tests/qemu-iotests/083.out b/tests/qemu-iotests/083.out
index 85ee8d6dd7..8c1441bf4f 100644
--- a/tests/qemu-iotests/083.out
+++ b/tests/qemu-iotests/083.out
@@ -1,163 +1,138 @@
QA output created by 083
=== Check disconnect before neg1 ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect after neg1 ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect 8 neg1 ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect 16 neg1 ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect before export ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect after export ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect 4 export ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect 12 export ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect 16 export ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect before neg2 ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect after neg2 ===
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
-no file open, try 'help open'
+read failed: Input/output error
=== Check disconnect 8 neg2 ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect 10 neg2 ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo
no file open, try 'help open'
=== Check disconnect before request ===
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
-no file open, try 'help open'
+read failed: Input/output error
=== Check disconnect after request ===
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
-no file open, try 'help open'
+read failed: Input/output error
=== Check disconnect before reply ===
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
-no file open, try 'help open'
+read failed: Input/output error
=== Check disconnect after reply ===
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
-no file open, try 'help open'
+read failed: Input/output error
=== Check disconnect 4 reply ===
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
-no file open, try 'help open'
+read failed: Input/output error
=== Check disconnect 8 reply ===
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
-no file open, try 'help open'
+read failed: Input/output error
=== Check disconnect before data ===
-qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
-no file open, try 'help open'
+read failed: Input/output error
=== Check disconnect after data ===
-read failed: Input/output error
+read 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== Check disconnect before neg-classic ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT
no file open, try 'help open'
=== Check disconnect 8 neg-classic ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT
no file open, try 'help open'
=== Check disconnect 16 neg-classic ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT
no file open, try 'help open'
=== Check disconnect 24 neg-classic ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT
no file open, try 'help open'
=== Check disconnect 28 neg-classic ===
-
-qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+qemu-io: can't open device nbd:127.0.0.1:PORT
no file open, try 'help open'
=== Check disconnect after neg-classic ===
-qemu-io: can't open device nbd:127.0.0.1:PORT: Could not read image for determining its format: Input/output error
-no file open, try 'help open'
+read failed: Input/output error
*** done
diff --git a/tests/qemu-iotests/087.out b/tests/qemu-iotests/087.out
index 91f4ea1a8b..0ba2e43b40 100644
--- a/tests/qemu-iotests/087.out
+++ b/tests/qemu-iotests/087.out
@@ -21,9 +21,9 @@ QMP_VERSION
{"return": {}}
{"error": {"class": "GenericError", "desc": "Device with id 'disk' already exists"}}
{"error": {"class": "GenericError", "desc": "Device name 'test-node' conflicts with an existing node name"}}
-{"error": {"class": "GenericError", "desc": "could not open disk image disk2: node-name=disk is conflicting with a device id"}}
-{"error": {"class": "GenericError", "desc": "could not open disk image disk2: Duplicate node name"}}
-{"error": {"class": "GenericError", "desc": "could not open disk image disk3: node-name=disk3 is conflicting with a device id"}}
+{"error": {"class": "GenericError", "desc": "node-name=disk is conflicting with a device id"}}
+{"error": {"class": "GenericError", "desc": "Duplicate node name"}}
+{"error": {"class": "GenericError", "desc": "node-name=disk3 is conflicting with a device id"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN"}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "ide1-cd0", "tray-open": true}}
@@ -57,7 +57,7 @@ QMP_VERSION
Testing:
QMP_VERSION
{"return": {}}
-{"error": {"class": "GenericError", "desc": "could not open disk image disk: Guest must be stopped for opening of encrypted image"}}
+{"error": {"class": "GenericError", "desc": "Guest must be stopped for opening of encrypted image"}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN"}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "ide1-cd0", "tray-open": true}}
diff --git a/tests/qemu-iotests/093 b/tests/qemu-iotests/093
new file mode 100755
index 0000000000..b9096a55d4
--- /dev/null
+++ b/tests/qemu-iotests/093
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+#
+# Tests for IO throttling
+#
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import iotests
+
+class ThrottleTestCase(iotests.QMPTestCase):
+ test_img = "null-aio://"
+
+ def blockstats(self, device):
+ result = self.vm.qmp("query-blockstats")
+ for r in result['return']:
+ if r['device'] == device:
+ stat = r['stats']
+ return stat['rd_bytes'], stat['rd_operations'], stat['wr_bytes'], stat['wr_operations']
+ raise Exception("Device not found for blockstats: %s" % device)
+
+ def setUp(self):
+ self.vm = iotests.VM().add_drive(self.test_img)
+ self.vm.launch()
+
+ def tearDown(self):
+ self.vm.shutdown()
+
+ def do_test_throttle(self, seconds, params):
+ def check_limit(limit, num):
+ # IO throttling algorithm is discrete, allow 10% error so the test
+ # is more robust
+ return limit == 0 or \
+ (num < seconds * limit * 1.1
+ and num > seconds * limit * 0.9)
+
+ nsec_per_sec = 1000000000
+
+ params['device'] = 'drive0'
+
+ result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
+ self.assert_qmp(result, 'return', {})
+
+ # Set vm clock to a known value
+ ns = seconds * nsec_per_sec
+ self.vm.qtest("clock_step %d" % ns)
+
+ # Submit enough requests. They will drain bps_max and iops_max, but the
+ # rest requests won't get executed until we advance the virtual clock
+ # with qtest interface
+ rq_size = 512
+ rd_nr = max(params['bps'] / rq_size / 2,
+ params['bps_rd'] / rq_size,
+ params['iops'] / 2,
+ params['iops_rd'])
+ rd_nr *= seconds * 2
+ wr_nr = max(params['bps'] / rq_size / 2,
+ params['bps_wr'] / rq_size,
+ params['iops'] / 2,
+ params['iops_wr'])
+ wr_nr *= seconds * 2
+ for i in range(rd_nr):
+ self.vm.hmp_qemu_io("drive0", "aio_read %d %d" % (i * rq_size, rq_size))
+ for i in range(wr_nr):
+ self.vm.hmp_qemu_io("drive0", "aio_write %d %d" % (i * rq_size, rq_size))
+
+ start_rd_bytes, start_rd_iops, start_wr_bytes, start_wr_iops = self.blockstats('drive0')
+
+ self.vm.qtest("clock_step %d" % ns)
+ end_rd_bytes, end_rd_iops, end_wr_bytes, end_wr_iops = self.blockstats('drive0')
+
+ rd_bytes = end_rd_bytes - start_rd_bytes
+ rd_iops = end_rd_iops - start_rd_iops
+ wr_bytes = end_wr_bytes - start_wr_bytes
+ wr_iops = end_wr_iops - start_wr_iops
+
+ self.assertTrue(check_limit(params['bps'], rd_bytes + wr_bytes))
+ self.assertTrue(check_limit(params['bps_rd'], rd_bytes))
+ self.assertTrue(check_limit(params['bps_wr'], wr_bytes))
+ self.assertTrue(check_limit(params['iops'], rd_iops + wr_iops))
+ self.assertTrue(check_limit(params['iops_rd'], rd_iops))
+ self.assertTrue(check_limit(params['iops_wr'], wr_iops))
+
+ def test_all(self):
+ params = {"bps": 4096,
+ "bps_rd": 4096,
+ "bps_wr": 4096,
+ "iops": 10,
+ "iops_rd": 10,
+ "iops_wr": 10,
+ }
+ # Pick each out of all possible params and test
+ for tk in params:
+ limits = dict([(k, 0) for k in params])
+ limits[tk] = params[tk]
+ self.do_test_throttle(5, limits)
+
+class ThrottleTestCoroutine(ThrottleTestCase):
+ test_img = "null-co://"
+
+if __name__ == '__main__':
+ iotests.main(supported_fmts=["raw"])
diff --git a/tests/qemu-iotests/093.out b/tests/qemu-iotests/093.out
new file mode 100644
index 0000000000..fbc63e62f8
--- /dev/null
+++ b/tests/qemu-iotests/093.out
@@ -0,0 +1,5 @@
+..
+----------------------------------------------------------------------
+Ran 2 tests
+
+OK
diff --git a/tests/qemu-iotests/094 b/tests/qemu-iotests/094
new file mode 100755
index 0000000000..27a2be2569
--- /dev/null
+++ b/tests/qemu-iotests/094
@@ -0,0 +1,81 @@
+#!/bin/bash
+#
+# Test case for drive-mirror to NBD (especially bdrv_swap() on NBD BDS)
+#
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=mreitz@redhat.com
+
+seq="$(basename $0)"
+echo "QA output created by $seq"
+
+here="$PWD"
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+trap "exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+. ./common.qemu
+
+_supported_fmt generic
+_supported_proto nbd
+_supported_os Linux
+_unsupported_imgopts "subformat=monolithicFlat" "subformat=twoGbMaxExtentFlat"
+
+_make_test_img 64M
+$QEMU_IMG create -f $IMGFMT "$TEST_DIR/source.$IMGFMT" 64M | _filter_img_create
+
+_launch_qemu -drive if=none,id=src,file="$TEST_DIR/source.$IMGFMT",format=raw \
+ -nodefaults
+
+_send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'qmp_capabilities'}" \
+ 'return'
+
+# 'format': 'nbd' is not actually "correct", but this is probably the only way
+# to test bdrv_swap() on an NBD BDS
+_send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'drive-mirror',
+ 'arguments': {'device': 'src',
+ 'target': '$TEST_IMG',
+ 'format': 'nbd',
+ 'sync':'full',
+ 'mode':'existing'}}" \
+ 'BLOCK_JOB_READY'
+
+_send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'block-job-complete',
+ 'arguments': {'device': 'src'}}" \
+ 'BLOCK_JOB_COMPLETE'
+
+_send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'quit'}" \
+ 'return'
+
+wait=1 _cleanup_qemu
+
+_cleanup_test_img
+rm -f "$TEST_DIR/source.$IMGFMT"
+
+# success, all done
+echo '*** done'
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/094.out b/tests/qemu-iotests/094.out
new file mode 100644
index 0000000000..b66dc0787d
--- /dev/null
+++ b/tests/qemu-iotests/094.out
@@ -0,0 +1,11 @@
+QA output created by 094
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+Formatting 'TEST_DIR/source.IMGFMT', fmt=IMGFMT size=67108864
+{"return": {}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN"}
+*** done
diff --git a/tests/qemu-iotests/100 b/tests/qemu-iotests/100
index 9124aba760..7c1b235b51 100755
--- a/tests/qemu-iotests/100
+++ b/tests/qemu-iotests/100
@@ -55,6 +55,8 @@ echo "== verify pattern =="
$QEMU_IO -c "read -P 0xcd 0 4k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
+_cleanup_test_img
+
echo
echo "== Sequential requests =="
_make_test_img $size
@@ -66,6 +68,8 @@ $QEMU_IO -c "read -P 0xcd 0 4k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "read -P 0xce 4k 4k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "read -P 0 8k 4k" "$TEST_IMG" | _filter_qemu_io
+_cleanup_test_img
+
echo
echo "== Superset overlapping requests =="
_make_test_img $size
@@ -79,6 +83,8 @@ $QEMU_IO -c "read -P 0xcd 0 1k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "read -P 0xcd 3k 1k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
+_cleanup_test_img
+
echo
echo "== Subset overlapping requests =="
_make_test_img $size
@@ -92,6 +98,8 @@ $QEMU_IO -c "read -P 0xce 0 1k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "read -P 0xce 3k 1k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
+_cleanup_test_img
+
echo
echo "== Head overlapping requests =="
_make_test_img $size
@@ -104,6 +112,8 @@ echo "== verify pattern =="
$QEMU_IO -c "read -P 0xce 2k 2k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
+_cleanup_test_img
+
echo
echo "== Tail overlapping requests =="
_make_test_img $size
@@ -116,6 +126,8 @@ echo "== verify pattern =="
$QEMU_IO -c "read -P 0xce 0k 2k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
+_cleanup_test_img
+
echo
echo "== Disjoint requests =="
_make_test_img $size
diff --git a/tests/qemu-iotests/104 b/tests/qemu-iotests/104
index b471aa5bb1..f32752bb6f 100755
--- a/tests/qemu-iotests/104
+++ b/tests/qemu-iotests/104
@@ -28,11 +28,7 @@ here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
-_cleanup()
-{
- _cleanup_test_img
-}
-trap "_cleanup; exit \$status" 0 1 2 3 15
+trap "exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
@@ -47,8 +43,9 @@ echo
image_sizes="1024 1234"
for s in $image_sizes; do
- _make_test_img $s | _filter_img_create
+ _make_test_img $s
_img_info | _filter_img_info
+ _cleanup_test_img
done
# success, all done
diff --git a/tests/qemu-iotests/116 b/tests/qemu-iotests/116
new file mode 100755
index 0000000000..713ed484ba
--- /dev/null
+++ b/tests/qemu-iotests/116
@@ -0,0 +1,96 @@
+#!/bin/bash
+#
+# Test error code paths for invalid QED images
+#
+# The aim of this test is to exercise the error paths in qed_open() to ensure
+# there are no crashes with invalid input files.
+#
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=stefanha@redhat.com
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt qed
+_supported_proto generic
+_supported_os Linux
+
+
+size=128M
+
+echo
+echo "== truncated header cluster =="
+_make_test_img $size
+truncate -s 512 "$TEST_IMG"
+$QEMU_IO -f "$IMGFMT" -c "read 0 $size" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== invalid header magic =="
+_make_test_img $size
+poke_file "$TEST_IMG" "0" "QEDX"
+$QEMU_IO -f "$IMGFMT" -c "read 0 $size" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== invalid cluster size =="
+_make_test_img $size
+poke_file "$TEST_IMG" "4" "\xff\xff\xff\xff"
+$QEMU_IO -f "$IMGFMT" -c "read 0 $size" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== invalid table size =="
+_make_test_img $size
+poke_file "$TEST_IMG" "8" "\xff\xff\xff\xff"
+$QEMU_IO -f "$IMGFMT" -c "read 0 $size" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== invalid header size =="
+_make_test_img $size
+poke_file "$TEST_IMG" "12" "\xff\xff\xff\xff"
+$QEMU_IO -f "$IMGFMT" -c "read 0 $size" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== invalid L1 table offset =="
+_make_test_img $size
+poke_file "$TEST_IMG" "40" "\xff\xff\xff\xff\xff\xff\xff\xff"
+$QEMU_IO -f "$IMGFMT" -c "read 0 $size" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== invalid image size =="
+_make_test_img $size
+poke_file "$TEST_IMG" "48" "\xff\xff\xff\xff\xff\xff\xff\xff"
+$QEMU_IO -f "$IMGFMT" -c "read 0 $size" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/116.out b/tests/qemu-iotests/116.out
new file mode 100644
index 0000000000..b679ceea63
--- /dev/null
+++ b/tests/qemu-iotests/116.out
@@ -0,0 +1,37 @@
+QA output created by 116
+
+== truncated header cluster ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+qemu-io: can't open device TEST_DIR/t.qed: Could not open 'TEST_DIR/t.qed': Invalid argument
+no file open, try 'help open'
+
+== invalid header magic ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+qemu-io: can't open device TEST_DIR/t.qed: Image not in QED format
+no file open, try 'help open'
+
+== invalid cluster size ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+qemu-io: can't open device TEST_DIR/t.qed: Could not open 'TEST_DIR/t.qed': Invalid argument
+no file open, try 'help open'
+
+== invalid table size ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+qemu-io: can't open device TEST_DIR/t.qed: Could not open 'TEST_DIR/t.qed': Invalid argument
+no file open, try 'help open'
+
+== invalid header size ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+qemu-io: can't open device TEST_DIR/t.qed: Could not open 'TEST_DIR/t.qed': Invalid argument
+no file open, try 'help open'
+
+== invalid L1 table offset ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+qemu-io: can't open device TEST_DIR/t.qed: Could not open 'TEST_DIR/t.qed': Invalid argument
+no file open, try 'help open'
+
+== invalid image size ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+qemu-io: can't open device TEST_DIR/t.qed: Could not open 'TEST_DIR/t.qed': Invalid argument
+no file open, try 'help open'
+*** done
diff --git a/tests/qemu-iotests/016 b/tests/qemu-iotests/123
index 52397aa80e..ad608035d1 100755
--- a/tests/qemu-iotests/016
+++ b/tests/qemu-iotests/123
@@ -1,8 +1,8 @@
#!/bin/bash
#
-# Test I/O after EOF for growable images.
+# Test case for qemu-img convert to NBD
#
-# Copyright (C) 2009 Red Hat, Inc.
+# Copyright (C) 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,18 +19,19 @@
#
# creator
-owner=hch@lst.de
+owner=mreitz@redhat.com
-seq=`basename $0`
+seq="$(basename $0)"
echo "QA output created by $seq"
-here=`pwd`
+here="$PWD"
tmp=/tmp/$$
status=1 # failure is the default!
_cleanup()
{
- _cleanup_test_img
+ _cleanup_test_img
+ rm -f "$SRC_IMG"
}
trap "_cleanup; exit \$status" 0 1 2 3 15
@@ -39,35 +40,23 @@ trap "_cleanup; exit \$status" 0 1 2 3 15
. ./common.filter
_supported_fmt raw
-_supported_proto file sheepdog nfs
+_supported_proto nbd
_supported_os Linux
+SRC_IMG="$TEST_DIR/source.$IMGFMT"
-# No -f, use probing for the protocol driver
-QEMU_IO_PROTO="$QEMU_IO_PROG -g --cache $CACHEMODE"
+_make_test_img 1M
+$QEMU_IMG create -f $IMGFMT "$SRC_IMG" 1M | _filter_img_create
-size=128M
-_make_test_img $size
+$QEMU_IO -c 'write -P 42 0 1M' "$SRC_IMG" | _filter_qemu_io
-echo
-echo "== reading at EOF =="
-$QEMU_IO_PROTO -c "read -P 0 $size 512" "$TEST_IMG" | _filter_qemu_io
-
-echo
-echo "== reading far past EOF =="
-$QEMU_IO_PROTO -c "read -P 0 256M 512" "$TEST_IMG" | _filter_qemu_io
+$QEMU_IMG convert -n -f $IMGFMT -O raw "$SRC_IMG" "$TEST_IMG"
-echo
-echo "== writing at EOF =="
-$QEMU_IO_PROTO -c "write -P 66 $size 512" "$TEST_IMG" | _filter_qemu_io
-$QEMU_IO -c "read -P 66 $size 512" "$TEST_IMG" | _filter_qemu_io
+$QEMU_IO -c 'read -P 42 0 1M' "$TEST_IMG" | _filter_qemu_io
-echo
-echo "== writing far past EOF =="
-$QEMU_IO_PROTO -c "write -P 66 256M 512" "$TEST_IMG" | _filter_qemu_io
-$QEMU_IO -c "read -P 66 256M 512" "$TEST_IMG" | _filter_qemu_io
# success, all done
-echo "*** done"
+echo
+echo '*** done'
rm -f $seq.full
status=0
diff --git a/tests/qemu-iotests/123.out b/tests/qemu-iotests/123.out
new file mode 100644
index 0000000000..0b818d34c4
--- /dev/null
+++ b/tests/qemu-iotests/123.out
@@ -0,0 +1,9 @@
+QA output created by 123
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576
+Formatting 'TEST_DIR/source.IMGFMT', fmt=IMGFMT size=1048576
+wrote 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+*** done
diff --git a/tests/qemu-iotests/common.filter b/tests/qemu-iotests/common.filter
index b73c70be95..06e1bb045f 100644
--- a/tests/qemu-iotests/common.filter
+++ b/tests/qemu-iotests/common.filter
@@ -200,6 +200,7 @@ _filter_img_info()
sed -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
-e "s#$TEST_DIR#TEST_DIR#g" \
-e "s#$IMGFMT#IMGFMT#g" \
+ -e 's#nbd://127.0.0.1:10810$#TEST_DIR/t.IMGFMT#g' \
-e "/encrypted: yes/d" \
-e "/cluster_size: [0-9]\\+/d" \
-e "/table_size: [0-9]\\+/d" \
diff --git a/tests/qemu-iotests/common.qemu b/tests/qemu-iotests/common.qemu
index 8e618b5149..4e1996c3ec 100644
--- a/tests/qemu-iotests/common.qemu
+++ b/tests/qemu-iotests/common.qemu
@@ -187,13 +187,23 @@ function _launch_qemu()
# Silenty kills the QEMU process
+#
+# If $wait is set to anything other than the empty string, the process will not
+# be killed but only waited for, and any output will be forwarded to stdout. If
+# $wait is empty, the process will be killed and all output will be suppressed.
function _cleanup_qemu()
{
# QEMU_PID[], QEMU_IN[], QEMU_OUT[] all use same indices
for i in "${!QEMU_OUT[@]}"
do
- kill -KILL ${QEMU_PID[$i]} 2>/dev/null
+ if [ -z "${wait}" ]; then
+ kill -KILL ${QEMU_PID[$i]} 2>/dev/null
+ fi
wait ${QEMU_PID[$i]} 2>/dev/null # silent kill
+ if [ -n "${wait}" ]; then
+ cat <&${QEMU_OUT[$i]} | _filter_testdir | _filter_qemu \
+ | _filter_qemu_io | _filter_qmp
+ fi
rm -f "${QEMU_FIFO_IN}_${i}" "${QEMU_FIFO_OUT}_${i}"
eval "exec ${QEMU_IN[$i]}<&-" # close file descriptors
eval "exec ${QEMU_OUT[$i]}<&-"
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
index aa093d9d84..22d3514041 100644
--- a/tests/qemu-iotests/common.rc
+++ b/tests/qemu-iotests/common.rc
@@ -153,7 +153,7 @@ _make_test_img()
# Start an NBD server on the image file, which is what we'll be talking to
if [ $IMGPROTO = "nbd" ]; then
- eval "$QEMU_NBD -v -t -b 127.0.0.1 -p 10810 $TEST_IMG_FILE &"
+ eval "$QEMU_NBD -v -t -b 127.0.0.1 -p 10810 -f $IMGFMT $TEST_IMG_FILE &"
QEMU_NBD_PID=$!
sleep 1 # FIXME: qemu-nbd needs to be listening before we continue
fi
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index f8bf354156..0d3b95c258 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -22,7 +22,7 @@
013 rw auto
014 rw auto
015 rw snapshot auto
-016 rw auto quick
+# 016 was removed, do not reuse
017 rw backing auto quick
018 rw backing auto quick
019 rw backing auto quick
@@ -99,6 +99,8 @@
090 rw auto quick
091 rw auto
092 rw auto quick
+093 auto
+094 rw auto quick
095 rw auto quick
097 rw auto backing
098 rw auto backing quick
@@ -116,3 +118,5 @@
111 rw auto quick
113 rw auto quick
114 rw auto quick
+116 rw auto quick
+123 rw auto quick
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 87002e0e2c..14028540b3 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -21,8 +21,11 @@ import re
import subprocess
import string
import unittest
-import sys; sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'scripts', 'qmp'))
+import sys
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'scripts'))
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'scripts', 'qmp'))
import qmp
+import qtest
import struct
__all__ = ['imgfmt', 'imgproto', 'test_dir' 'qemu_img', 'qemu_io',
@@ -81,10 +84,12 @@ class VM(object):
def __init__(self):
self._monitor_path = os.path.join(test_dir, 'qemu-mon.%d' % os.getpid())
self._qemu_log_path = os.path.join(test_dir, 'qemu-log.%d' % os.getpid())
+ self._qtest_path = os.path.join(test_dir, 'qemu-qtest.%d' % os.getpid())
self._args = qemu_args + ['-chardev',
'socket,id=mon,path=' + self._monitor_path,
'-mon', 'chardev=mon,mode=control',
- '-qtest', 'stdio', '-machine', 'accel=qtest',
+ '-qtest', 'unix:path=' + self._qtest_path,
+ '-machine', 'accel=qtest',
'-display', 'none', '-vga', 'none']
self._num_drives = 0
@@ -160,9 +165,11 @@ class VM(object):
qemulog = open(self._qemu_log_path, 'wb')
try:
self._qmp = qmp.QEMUMonitorProtocol(self._monitor_path, server=True)
+ self._qtest = qtest.QEMUQtestProtocol(self._qtest_path, server=True)
self._popen = subprocess.Popen(self._args, stdin=devnull, stdout=qemulog,
stderr=subprocess.STDOUT)
self._qmp.accept()
+ self._qtest.accept()
except:
os.remove(self._monitor_path)
raise
@@ -173,18 +180,26 @@ class VM(object):
self._qmp.cmd('quit')
self._popen.wait()
os.remove(self._monitor_path)
+ os.remove(self._qtest_path)
os.remove(self._qemu_log_path)
self._popen = None
underscore_to_dash = string.maketrans('_', '-')
- def qmp(self, cmd, **args):
+ def qmp(self, cmd, conv_keys=True, **args):
'''Invoke a QMP command and return the result dict'''
qmp_args = dict()
for k in args.keys():
- qmp_args[k.translate(self.underscore_to_dash)] = args[k]
+ if conv_keys:
+ qmp_args[k.translate(self.underscore_to_dash)] = args[k]
+ else:
+ qmp_args[k] = args[k]
return self._qmp.cmd(cmd, args=qmp_args)
+ def qtest(self, cmd):
+ '''Send a qtest command to guest'''
+ return self._qtest.cmd(cmd)
+
def get_qmp_event(self, wait=False):
'''Poll for one queued QMP events and return it'''
return self._qmp.pull_event(wait=wait)
@@ -288,7 +303,7 @@ def main(supported_fmts=[], supported_oses=['linux']):
if supported_fmts and (imgfmt not in supported_fmts):
notrun('not suitable for this image format: %s' % imgfmt)
- if sys.platform not in supported_oses:
+ if True not in [sys.platform.startswith(x) for x in supported_oses]:
notrun('not suitable for this OS: %s' % sys.platform)
# We need to filter out the time taken from the output so that qemu-iotest
diff --git a/tests/test-write-threshold.c b/tests/test-write-threshold.c
new file mode 100644
index 0000000000..faffa7b855
--- /dev/null
+++ b/tests/test-write-threshold.c
@@ -0,0 +1,119 @@
+/*
+ * Test block device write threshold
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include <glib.h>
+#include <stdint.h>
+#include "block/block_int.h"
+#include "block/write-threshold.h"
+
+
+static void test_threshold_not_set_on_init(void)
+{
+ uint64_t res;
+ BlockDriverState bs;
+ memset(&bs, 0, sizeof(bs));
+
+ g_assert(!bdrv_write_threshold_is_set(&bs));
+
+ res = bdrv_write_threshold_get(&bs);
+ g_assert_cmpint(res, ==, 0);
+}
+
+static void test_threshold_set_get(void)
+{
+ uint64_t threshold = 4 * 1024 * 1024;
+ uint64_t res;
+ BlockDriverState bs;
+ memset(&bs, 0, sizeof(bs));
+
+ bdrv_write_threshold_set(&bs, threshold);
+
+ g_assert(bdrv_write_threshold_is_set(&bs));
+
+ res = bdrv_write_threshold_get(&bs);
+ g_assert_cmpint(res, ==, threshold);
+}
+
+static void test_threshold_multi_set_get(void)
+{
+ uint64_t threshold1 = 4 * 1024 * 1024;
+ uint64_t threshold2 = 15 * 1024 * 1024;
+ uint64_t res;
+ BlockDriverState bs;
+ memset(&bs, 0, sizeof(bs));
+
+ bdrv_write_threshold_set(&bs, threshold1);
+ bdrv_write_threshold_set(&bs, threshold2);
+ res = bdrv_write_threshold_get(&bs);
+ g_assert_cmpint(res, ==, threshold2);
+}
+
+static void test_threshold_not_trigger(void)
+{
+ uint64_t amount = 0;
+ uint64_t threshold = 4 * 1024 * 1024;
+ BlockDriverState bs;
+ BdrvTrackedRequest req;
+
+ memset(&bs, 0, sizeof(bs));
+ memset(&req, 0, sizeof(req));
+ req.offset = 1024;
+ req.bytes = 1024;
+
+ bdrv_write_threshold_set(&bs, threshold);
+ amount = bdrv_write_threshold_exceeded(&bs, &req);
+ g_assert_cmpuint(amount, ==, 0);
+}
+
+
+static void test_threshold_trigger(void)
+{
+ uint64_t amount = 0;
+ uint64_t threshold = 4 * 1024 * 1024;
+ BlockDriverState bs;
+ BdrvTrackedRequest req;
+
+ memset(&bs, 0, sizeof(bs));
+ memset(&req, 0, sizeof(req));
+ req.offset = (4 * 1024 * 1024) - 1024;
+ req.bytes = 2 * 1024;
+
+ bdrv_write_threshold_set(&bs, threshold);
+ amount = bdrv_write_threshold_exceeded(&bs, &req);
+ g_assert_cmpuint(amount, >=, 1024);
+}
+
+typedef struct TestStruct {
+ const char *name;
+ void (*func)(void);
+} TestStruct;
+
+
+int main(int argc, char **argv)
+{
+ size_t i;
+ TestStruct tests[] = {
+ { "/write-threshold/not-set-on-init",
+ test_threshold_not_set_on_init },
+ { "/write-threshold/set-get",
+ test_threshold_set_get },
+ { "/write-threshold/multi-set-get",
+ test_threshold_multi_set_get },
+ { "/write-threshold/not-trigger",
+ test_threshold_not_trigger },
+ { "/write-threshold/trigger",
+ test_threshold_trigger },
+ { NULL, NULL }
+ };
+
+ g_test_init(&argc, &argv, NULL);
+ for (i = 0; tests[i].name != NULL; i++) {
+ g_test_add_func(tests[i].name, tests[i].func);
+ }
+ return g_test_run();
+}
diff --git a/trace-events b/trace-events
index 907da7ed8a..f87b077805 100644
--- a/trace-events
+++ b/trace-events
@@ -116,6 +116,7 @@ virtio_blk_req_complete(void *req, int status) "req %p status %d"
virtio_blk_rw_complete(void *req, int ret) "req %p ret %d"
virtio_blk_handle_write(void *req, uint64_t sector, size_t nsectors) "req %p sector %"PRIu64" nsectors %zu"
virtio_blk_handle_read(void *req, uint64_t sector, size_t nsectors) "req %p sector %"PRIu64" nsectors %zu"
+virtio_blk_submit_multireq(void *mrb, int start, int num_reqs, uint64_t sector, size_t nsectors, bool is_write) "mrb %p start %d num_reqs %d sector %"PRIu64" nsectors %zu is_write %d"
# hw/block/dataplane/virtio-blk.c
virtio_blk_data_plane_start(void *s) "dataplane %p"
@@ -202,6 +203,10 @@ hd_geometry_guess(void *blk, uint32_t cyls, uint32_t heads, uint32_t secs, int t
jazz_led_read(uint64_t addr, uint8_t val) "read addr=0x%"PRIx64": 0x%x"
jazz_led_write(uint64_t addr, uint8_t new) "write addr=0x%"PRIx64": 0x%x"
+# hw/display/xenfb.c
+xenfb_mouse_event(void *opaque, int dx, int dy, int dz, int button_state, int abs_pointer_wanted) "%p x %d y %d z %d bs %#x abs %d"
+xenfb_input_connected(void *xendev, int abs_pointer_wanted) "%p abs %d"
+
# hw/net/lance.c
lance_mem_readw(uint64_t addr, uint32_t ret) "addr=%"PRIx64"val=0x%04x"
lance_mem_writew(uint64_t addr, uint32_t val) "addr=%"PRIx64"val=0x%04x"
@@ -220,6 +225,23 @@ slavio_check_interrupts(uint32_t pending, uint32_t intregm_disabled) "pending %x
slavio_set_irq(uint32_t target_cpu, int irq, uint32_t pil, int level) "Set cpu %d irq %d -> pil %d level %d"
slavio_set_timer_irq_cpu(int cpu, int level) "Set cpu %d local timer level %d"
+# hw/input/ps2.c
+ps2_put_keycode(void *opaque, int keycode) "%p keycode %d"
+ps2_read_data(void *opaque) "%p"
+ps2_set_ledstate(void *s, int ledstate) "%p ledstate %d"
+ps2_reset_keyboard(void *s) "%p"
+ps2_write_keyboard(void *opaque, int val) "%p val %d"
+ps2_keyboard_set_translation(void *opaque, int mode) "%p mode %d"
+ps2_mouse_send_packet(void *s, int dx1, int dy1, int dz1, int b) "%p x %d y %d z %d bs %#x"
+ps2_mouse_event_disabled(void *opaque, int dx, int dy, int dz, int buttons_state, int mouse_dx, int mouse_dy, int mouse_dz) "%p x %d y %d z %d bs %#x mx %d my %d mz %d "
+ps2_mouse_event(void *opaque, int dx, int dy, int dz, int buttons_state, int mouse_dx, int mouse_dy, int mouse_dz) "%p x %d y %d z %d bs %#x mx %d my %d mz %d "
+ps2_mouse_fake_event(void *opaque) "%p"
+ps2_write_mouse(void *opaque, int val) "%p val %d"
+ps2_kbd_reset(void *opaque) "%p"
+ps2_mouse_reset(void *opaque) "%p"
+ps2_kbd_init(void *s) "%p"
+ps2_mouse_init(void *s) "%p"
+
# hw/misc/slavio_misc.c
slavio_misc_update_irq_raise(void) "Raise IRQ"
slavio_misc_update_irq_lower(void) "Lower IRQ"
@@ -1260,6 +1282,26 @@ spapr_pci_msi_retry(unsigned config_addr, unsigned req_num, unsigned max_irqs) "
pci_update_mappings_del(void *d, uint32_t bus, uint32_t func, uint32_t slot, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,%#"PRIx64"+%#"PRIx64
pci_update_mappings_add(void *d, uint32_t bus, uint32_t func, uint32_t slot, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,%#"PRIx64"+%#"PRIx64
+# hw/net/pcnet.c
+pcnet_s_reset(void *s) "s=%p"
+pcnet_user_int(void *s) "s=%p"
+pcnet_isr_change(void *s, uint32_t isr, uint32_t isr_old) "s=%p INTA=%d<=%d"
+pcnet_init(void *s, uint64_t init_addr) "s=%p init_addr=%#"PRIx64
+pcnet_rlen_tlen(void *s, uint32_t rlen, uint32_t tlen) "s=%p rlen=%d tlen=%d"
+pcnet_ss32_rdra_tdra(void *s, uint32_t ss32, uint32_t rdra, uint32_t rcvrl, uint32_t tdra, uint32_t xmtrl) "s=%p ss32=%d rdra=0x%08x[%d] tdra=0x%08x[%d]"
+
+# hw/net/pcnet-pci.c
+pcnet_aprom_writeb(void *opaque, uint32_t addr, uint32_t val) "opaque=%p addr=0x%08x val=0x%02x"
+pcnet_aprom_readb(void *opaque, uint32_t addr, uint32_t val) "opaque=%p addr=0x%08x val=0x%02x"
+pcnet_ioport_read(void *opaque, uint64_t addr, unsigned size) "opaque=%p addr=%#"PRIx64" size=%d"
+pcnet_ioport_write(void *opaque, uint64_t addr, uint64_t data, unsigned size) "opaque=%p addr=%#"PRIx64" data=%#"PRIx64" size=%d"
+pcnet_mmio_writeb(void *opaque, uint64_t addr, uint32_t val) "opaque=%p addr=%#"PRIx64" val=0x%x"
+pcnet_mmio_writew(void *opaque, uint64_t addr, uint32_t val) "opaque=%p addr=%#"PRIx64" val=0x%x"
+pcnet_mmio_writel(void *opaque, uint64_t addr, uint32_t val) "opaque=%p addr=%#"PRIx64" val=0x%x"
+pcnet_mmio_readb(void *opaque, uint64_t addr, uint32_t val) "opaque=%p addr=%#"PRIx64" val=0x%x"
+pcnet_mmio_readw(void *opaque, uint64_t addr, uint32_t val) "opaque=%p addr=%#"PRIx64" val=0x%x"
+pcnet_mmio_readl(void *opaque, uint64_t addr, uint32_t val) "opaque=%p addr=%#"PRIx64" val=0x%x"
+
# hw/intc/xics.c
xics_icp_check_ipi(int server, uint8_t mfrr) "CPU %d can take IPI mfrr=%#x"
xics_icp_accept(uint32_t old_xirr, uint32_t new_xirr) "icp_accept: XIRR %#"PRIx32"->%#"PRIx32
diff --git a/translate-all.c b/translate-all.c
index 4a1b64fd83..9f47ce7f71 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -631,7 +631,7 @@ static inline void *alloc_code_gen_buffer(void)
#else
static inline void *alloc_code_gen_buffer(void)
{
- void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
+ void *buf = g_try_malloc(tcg_ctx.code_gen_buffer_size);
if (buf == NULL) {
return NULL;
diff --git a/ui/vnc.c b/ui/vnc.c
index a742c9071c..10a272450b 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -659,10 +659,6 @@ void buffer_reserve(Buffer *buffer, size_t len)
if ((buffer->capacity - buffer->offset) < len) {
buffer->capacity += (len + 1024);
buffer->buffer = g_realloc(buffer->buffer, buffer->capacity);
- if (buffer->buffer == NULL) {
- fprintf(stderr, "vnc: out of memory\n");
- exit(1);
- }
}
}
@@ -1115,6 +1111,12 @@ static int vnc_update_client(VncState *vs, int has_dirty, bool sync)
n += vnc_job_add_rect(job, x * VNC_DIRTY_PIXELS_PER_BIT, y,
(x2 - x) * VNC_DIRTY_PIXELS_PER_BIT, h);
}
+ if (!x && x2 == width / VNC_DIRTY_PIXELS_PER_BIT) {
+ y += h;
+ if (y == height) {
+ break;
+ }
+ }
}
vnc_job_push(job);
@@ -3246,6 +3248,7 @@ char *vnc_display_local_addr(const char *id)
{
VncDisplay *vs = vnc_display_find(id);
+ assert(vs);
return vnc_socket_local_addr("%s:%s", vs->lsock);
}
@@ -3276,6 +3279,15 @@ static QemuOptsList qemu_vnc_opts = {
.name = "connections",
.type = QEMU_OPT_NUMBER,
},{
+ .name = "to",
+ .type = QEMU_OPT_NUMBER,
+ },{
+ .name = "ipv4",
+ .type = QEMU_OPT_BOOL,
+ },{
+ .name = "ipv6",
+ .type = QEMU_OPT_BOOL,
+ },{
.name = "password",
.type = QEMU_OPT_BOOL,
},{
@@ -3311,19 +3323,24 @@ void vnc_display_open(const char *id, Error **errp)
{
VncDisplay *vs = vnc_display_find(id);
QemuOpts *opts = qemu_opts_find(&qemu_vnc_opts, id);
- const char *display, *share, *device_id;
+ const char *share, *device_id;
QemuConsole *con;
- int password = 0;
- int reverse = 0;
+ bool password = false;
+ bool reverse = false;
+ const char *vnc;
+ const char *has_to;
+ char *display, *to = NULL;
+ bool has_ipv4 = false;
+ bool has_ipv6 = false;
#ifdef CONFIG_VNC_WS
const char *websocket;
#endif
#ifdef CONFIG_VNC_TLS
- int tls = 0, x509 = 0;
+ bool tls = false, x509 = false;
const char *path;
#endif
#ifdef CONFIG_VNC_SASL
- int sasl = 0;
+ bool sasl = false;
int saslErr;
#endif
#if defined(CONFIG_VNC_TLS) || defined(CONFIG_VNC_SASL)
@@ -3340,10 +3357,21 @@ void vnc_display_open(const char *id, Error **errp)
if (!opts) {
return;
}
- display = qemu_opt_get(opts, "vnc");
- if (!display || strcmp(display, "none") == 0) {
+ vnc = qemu_opt_get(opts, "vnc");
+ if (!vnc || strcmp(vnc, "none") == 0) {
return;
}
+
+ has_to = qemu_opt_get(opts, "to");
+ if (has_to) {
+ to = g_strdup_printf(",to=%s", has_to);
+ }
+ has_ipv4 = qemu_opt_get_bool(opts, "ipv4", false);
+ has_ipv6 = qemu_opt_get_bool(opts, "ipv6", false);
+ display = g_strdup_printf("%s%s%s%s", vnc,
+ has_to ? to : "",
+ has_ipv4 ? ",ipv4" : "",
+ has_ipv6 ? ",ipv6" : "");
vs->display = g_strdup(display);
password = qemu_opt_get_bool(opts, "password", false);
@@ -3364,7 +3392,7 @@ void vnc_display_open(const char *id, Error **errp)
tls = qemu_opt_get_bool(opts, "tls", false);
path = qemu_opt_get(opts, "x509");
if (path) {
- x509 = 1;
+ x509 = true;
vs->tls.x509verify = qemu_opt_get_bool(opts, "x509verify", false);
if (vnc_tls_set_x509_creds_dir(vs, path) < 0) {
error_setg(errp, "Failed to find x509 certificates/keys in %s",
@@ -3623,6 +3651,8 @@ void vnc_display_open(const char *id, Error **errp)
}
#endif /* CONFIG_VNC_WS */
}
+ g_free(to);
+ g_free(display);
g_free(vs->display);
vs->display = dpy;
qemu_set_fd_handler2(vs->lsock, NULL,
@@ -3637,6 +3667,8 @@ void vnc_display_open(const char *id, Error **errp)
return;
fail:
+ g_free(to);
+ g_free(display);
g_free(vs->display);
vs->display = NULL;
#ifdef CONFIG_VNC_WS
@@ -3660,6 +3692,19 @@ QemuOpts *vnc_parse_func(const char *str)
return qemu_opts_parse(qemu_find_opts("vnc"), str, 1);
}
+void vnc_auto_assign_id(QemuOptsList *olist, QemuOpts *opts)
+{
+ int i = 2;
+ char *id;
+
+ id = g_strdup("default");
+ while (qemu_opts_find(olist, id)) {
+ g_free(id);
+ id = g_strdup_printf("vnc%d", i++);
+ }
+ qemu_opts_set_id(opts, id);
+}
+
int vnc_init_func(QemuOpts *opts, void *opaque)
{
Error *local_err = NULL;
@@ -3668,13 +3713,8 @@ int vnc_init_func(QemuOpts *opts, void *opaque)
if (!id) {
/* auto-assign id if not present */
- int i = 2;
- id = g_strdup("default");
- while (qemu_opts_find(olist, id)) {
- g_free(id);
- id = g_strdup_printf("vnc%d", i++);
- }
- qemu_opts_set_id(opts, id);
+ vnc_auto_assign_id(olist, opts);
+ id = (char *)qemu_opts_id(opts);
}
vnc_display_init(id);
diff --git a/util/aes.c b/util/aes.c
index 6058f1950b..3d7c4be9b6 100644
--- a/util/aes.c
+++ b/util/aes.c
@@ -1161,7 +1161,7 @@ int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
rk += 8;
}
}
- return 0;
+ abort();
}
/**
diff --git a/util/qemu-option.c b/util/qemu-option.c
index a708241643..d3ab65d24f 100644
--- a/util/qemu-option.c
+++ b/util/qemu-option.c
@@ -213,7 +213,7 @@ void parse_option_size(const char *name, const char *value,
bool has_help_option(const char *param)
{
size_t buflen = strlen(param) + 1;
- char *buf = g_malloc0(buflen);
+ char *buf = g_malloc(buflen);
const char *p = param;
bool result = false;
@@ -230,14 +230,14 @@ bool has_help_option(const char *param)
}
out:
- free(buf);
+ g_free(buf);
return result;
}
bool is_valid_option_list(const char *param)
{
size_t buflen = strlen(param) + 1;
- char *buf = g_malloc0(buflen);
+ char *buf = g_malloc(buflen);
const char *p = param;
bool result = true;
@@ -255,7 +255,7 @@ bool is_valid_option_list(const char *param)
}
out:
- free(buf);
+ g_free(buf);
return result;
}
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index a76bb3c913..61fc3c1364 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -512,7 +512,7 @@ InetSocketAddress *inet_parse(const char *str, Error **errp)
{
InetSocketAddress *addr;
const char *optstr, *h;
- char host[64];
+ char host[65];
char port[33];
int to;
int pos;
@@ -694,7 +694,7 @@ int unix_listen_opts(QemuOpts *opts, Error **errp)
sock = qemu_socket(PF_UNIX, SOCK_STREAM, 0);
if (sock < 0) {
- error_setg_errno(errp, errno, "Failed to create socket");
+ error_setg_errno(errp, errno, "Failed to create Unix socket");
return -1;
}
@@ -703,9 +703,15 @@ int unix_listen_opts(QemuOpts *opts, Error **errp)
if (path && strlen(path)) {
snprintf(un.sun_path, sizeof(un.sun_path), "%s", path);
} else {
- char *tmpdir = getenv("TMPDIR");
- snprintf(un.sun_path, sizeof(un.sun_path), "%s/qemu-socket-XXXXXX",
- tmpdir ? tmpdir : "/tmp");
+ const char *tmpdir = getenv("TMPDIR");
+ tmpdir = tmpdir ? tmpdir : "/tmp";
+ if (snprintf(un.sun_path, sizeof(un.sun_path), "%s/qemu-socket-XXXXXX",
+ tmpdir) >= sizeof(un.sun_path)) {
+ error_setg_errno(errp, errno,
+ "TMPDIR environment variable (%s) too large", tmpdir);
+ goto err;
+ }
+
/*
* This dummy fd usage silences the mktemp() unsecure warning.
* Using mkstemp() doesn't make things more secure here
@@ -713,13 +719,19 @@ int unix_listen_opts(QemuOpts *opts, Error **errp)
* to unlink first and thus re-open the race window. The
* worst case possible is bind() failing, i.e. a DoS attack.
*/
- fd = mkstemp(un.sun_path); close(fd);
+ fd = mkstemp(un.sun_path);
+ if (fd < 0) {
+ error_setg_errno(errp, errno,
+ "Failed to make a temporary socket name in %s", tmpdir);
+ goto err;
+ }
+ close(fd);
qemu_opt_set(opts, "path", un.sun_path);
}
unlink(un.sun_path);
if (bind(sock, (struct sockaddr*) &un, sizeof(un)) < 0) {
- error_setg_errno(errp, errno, "Failed to bind socket");
+ error_setg_errno(errp, errno, "Failed to bind socket to %s", un.sun_path);
goto err;
}
if (listen(sock, 1) < 0) {
diff --git a/util/uri.c b/util/uri.c
index 918d23516d..1cfd78bdb5 100644
--- a/util/uri.c
+++ b/util/uri.c
@@ -928,12 +928,10 @@ uri_parse(const char *str) {
if (str == NULL)
return(NULL);
uri = uri_new();
- if (uri != NULL) {
- ret = rfc3986_parse_uri_reference(uri, str);
- if (ret) {
- uri_free(uri);
- return(NULL);
- }
+ ret = rfc3986_parse_uri_reference(uri, str);
+ if (ret) {
+ uri_free(uri);
+ return(NULL);
}
return(uri);
}
@@ -974,15 +972,13 @@ uri_parse_raw(const char *str, int raw) {
if (str == NULL)
return(NULL);
uri = uri_new();
- if (uri != NULL) {
- if (raw) {
- uri->cleanup |= 2;
- }
- ret = uri_parse_into(uri, str);
- if (ret) {
- uri_free(uri);
- return(NULL);
- }
+ if (raw) {
+ uri->cleanup |= 2;
+ }
+ ret = uri_parse_into(uri, str);
+ if (ret) {
+ uri_free(uri);
+ return(NULL);
}
return(uri);
}
@@ -1053,14 +1049,12 @@ uri_to_string(URI *uri) {
while (*p != 0) {
if (len >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = *p++;
}
if (len >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = ':';
@@ -1070,7 +1064,6 @@ uri_to_string(URI *uri) {
while (*p != 0) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
if (IS_RESERVED(*(p)) || IS_UNRESERVED(*(p)))
@@ -1087,7 +1080,6 @@ uri_to_string(URI *uri) {
if (uri->server != NULL) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = '/';
@@ -1097,7 +1089,6 @@ uri_to_string(URI *uri) {
while (*p != 0) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
if ((IS_UNRESERVED(*(p))) ||
@@ -1116,7 +1107,6 @@ uri_to_string(URI *uri) {
}
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = '@';
@@ -1125,7 +1115,6 @@ uri_to_string(URI *uri) {
while (*p != 0) {
if (len >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = *p++;
@@ -1133,7 +1122,6 @@ uri_to_string(URI *uri) {
if (uri->port > 0) {
if (len + 10 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
len += snprintf(&ret[len], max - len, ":%d", uri->port);
@@ -1141,7 +1129,6 @@ uri_to_string(URI *uri) {
} else if (uri->authority != NULL) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = '/';
@@ -1150,7 +1137,6 @@ uri_to_string(URI *uri) {
while (*p != 0) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
if ((IS_UNRESERVED(*(p))) ||
@@ -1169,7 +1155,6 @@ uri_to_string(URI *uri) {
} else if (uri->scheme != NULL) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = '/';
@@ -1189,7 +1174,6 @@ uri_to_string(URI *uri) {
(!strcmp(uri->scheme, "file"))) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = *p++;
@@ -1199,7 +1183,6 @@ uri_to_string(URI *uri) {
while (*p != 0) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
if ((IS_UNRESERVED(*(p))) || ((*(p) == '/')) ||
@@ -1219,7 +1202,6 @@ uri_to_string(URI *uri) {
if (uri->query != NULL) {
if (len + 1 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = '?';
@@ -1227,7 +1209,6 @@ uri_to_string(URI *uri) {
while (*p != 0) {
if (len + 1 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = *p++;
@@ -1237,7 +1218,6 @@ uri_to_string(URI *uri) {
if (uri->fragment != NULL) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len++] = '#';
@@ -1245,7 +1225,6 @@ uri_to_string(URI *uri) {
while (*p != 0) {
if (len + 3 >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
if ((IS_UNRESERVED(*(p))) || (IS_RESERVED(*(p))))
@@ -1261,15 +1240,10 @@ uri_to_string(URI *uri) {
}
if (len >= max) {
temp = realloc2n(ret, &max);
- if (temp == NULL) goto mem_error;
ret = temp;
}
ret[len] = 0;
return(ret);
-
-mem_error:
- g_free(ret);
- return(NULL);
}
/**
@@ -1675,8 +1649,6 @@ uri_resolve(const char *uri, const char *base) {
else {
if (*uri) {
ref = uri_new();
- if (ref == NULL)
- goto done;
ret = uri_parse_into(ref, uri);
}
else
@@ -1695,8 +1667,6 @@ uri_resolve(const char *uri, const char *base) {
ret = -1;
else {
bas = uri_new();
- if (bas == NULL)
- goto done;
ret = uri_parse_into(bas, base);
}
if (ret != 0) {
@@ -1727,8 +1697,6 @@ uri_resolve(const char *uri, const char *base) {
* document.
*/
res = uri_new();
- if (res == NULL)
- goto done;
if ((ref->scheme == NULL) && (ref->path == NULL) &&
((ref->authority == NULL) && (ref->server == NULL))) {
res->scheme = g_strdup(bas->scheme);
@@ -1933,8 +1901,6 @@ uri_resolve_relative (const char *uri, const char * base)
* First parse URI into a standard form
*/
ref = uri_new ();
- if (ref == NULL)
- return NULL;
/* If URI not already in "relative" form */
if (uri[0] != '.') {
ret = uri_parse_into (ref, uri);
@@ -1951,8 +1917,6 @@ uri_resolve_relative (const char *uri, const char * base)
goto done;
}
bas = uri_new ();
- if (bas == NULL)
- goto done;
if (base[0] != '.') {
ret = uri_parse_into (bas, base);
if (ret != 0)
@@ -1971,7 +1935,8 @@ uri_resolve_relative (const char *uri, const char * base)
val = g_strdup (uri);
goto done;
}
- if (!strcmp(bas->path, ref->path)) {
+ if (bas->path == ref->path ||
+ (bas->path && ref->path && !strcmp(bas->path, ref->path))) {
val = g_strdup("");
goto done;
}
diff --git a/vl.c b/vl.c
index 983259bc9f..8c8f14287e 100644
--- a/vl.c
+++ b/vl.c
@@ -2195,6 +2195,7 @@ static int mon_init_func(QemuOpts *opts, void *opaque)
static void monitor_parse(const char *optarg, const char *mode, bool pretty)
{
static int monitor_device_index = 0;
+ Error *local_err = NULL;
QemuOpts *opts;
const char *p;
char label[32];
@@ -2215,9 +2216,10 @@ static void monitor_parse(const char *optarg, const char *mode, bool pretty)
}
}
- opts = qemu_opts_create(qemu_find_opts("mon"), label, 1, NULL);
+ opts = qemu_opts_create(qemu_find_opts("mon"), label, 1, &local_err);
if (!opts) {
- fprintf(stderr, "duplicate chardev: %s\n", label);
+ error_report("%s", error_get_pretty(local_err));
+ error_free(local_err);
exit(1);
}
qemu_opt_set(opts, "mode", mode);
@@ -2450,6 +2452,7 @@ static gint machine_class_cmp(gconstpointer a, gconstpointer b)
mc = find_machine(name);
}
if (mc) {
+ g_slist_free(machines);
return mc;
}
if (name && !is_help_option(name)) {