aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-04-28 16:55:03 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-04-28 16:55:03 +0100
commita9392bc93c8615ad1983047e9f91ee3fa8aae75f (patch)
tree0ff3fbb6401aa0addbbda6900ce4b984b0c1d2a3 /tests
parent84cbd63f87c1d246f51ec8eee5367a5588f367fd (diff)
parent61007b316cd71ee7333ff7a0a749a8949527575f (diff)
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
Block patches # gpg: Signature made Tue Apr 28 15:35:05 2015 BST using RSA key ID C88F2FD6 # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" * remotes/kevin/tags/for-upstream: (76 commits) block: move I/O request processing to block/io.c block: extract bdrv_setup_io_funcs() block: add bdrv_set_dirty()/bdrv_reset_dirty() to block_int.h block: replace bdrv_states iteration with bdrv_next() vmdk: Widen before shifting 32 bit header field block/dmg: make it modular block/mirror: Always call block_job_sleep_ns() iotests: add incremental backup granularity tests iotests: add incremental backup failure recovery test iotests: add simple incremental backup case iotests: add QMP event waiting queue iotests: add invalid input incremental backup tests hbitmap: truncate tests block: Resize bitmaps on bdrv_truncate block: Ensure consistent bitmap function prototypes block: add BdrvDirtyBitmap documentation qmp: Add dirty bitmap status field in query-block qmp: add block-dirty-bitmap-clear qmp: Add support of "dirty-bitmap" sync mode for drive-backup block: Add bitmap successors ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'tests')
-rwxr-xr-xtests/qemu-iotests/122223
-rw-r--r--tests/qemu-iotests/122.out209
-rw-r--r--tests/qemu-iotests/124363
-rw-r--r--tests/qemu-iotests/124.out5
-rw-r--r--tests/qemu-iotests/12986
-rw-r--r--tests/qemu-iotests/129.out5
-rw-r--r--tests/qemu-iotests/group3
-rw-r--r--tests/qemu-iotests/iotests.py38
-rw-r--r--tests/test-aio.c19
-rw-r--r--tests/test-hbitmap.c255
10 files changed, 1198 insertions, 8 deletions
diff --git a/tests/qemu-iotests/122 b/tests/qemu-iotests/122
new file mode 100755
index 0000000000..350ca9c466
--- /dev/null
+++ b/tests/qemu-iotests/122
@@ -0,0 +1,223 @@
+#!/bin/bash
+#
+# Test some qemu-img convert cases
+#
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=kwolf@redhat.com
+
+seq="$(basename $0)"
+echo "QA output created by $seq"
+
+here="$PWD"
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+_cleanup()
+{
+ rm -f "$TEST_IMG".[123]
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt qcow2
+_supported_proto file
+_supported_os Linux
+
+
+TEST_IMG="$TEST_IMG".base _make_test_img 64M
+$QEMU_IO -c "write -P 0x11 0 64M" "$TEST_IMG".base 2>&1 | _filter_qemu_io | _filter_testdir
+
+
+echo
+echo "=== Check allocation status regression with -B ==="
+echo
+
+_make_test_img -b "$TEST_IMG".base
+$QEMU_IO -c "write -P 0x22 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IMG map "$TEST_IMG".orig | _filter_qemu_img_map
+
+
+echo
+echo "=== Check that zero clusters are kept in overlay ==="
+echo
+
+_make_test_img -b "$TEST_IMG".base
+
+$QEMU_IO -c "write -P 0 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG convert -O $IMGFMT -c -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+
+$QEMU_IO -c "write -z 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG convert -O $IMGFMT -c -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+
+
+echo
+echo "=== Concatenate multiple source images ==="
+echo
+
+TEST_IMG="$TEST_IMG".1 _make_test_img 4M
+TEST_IMG="$TEST_IMG".2 _make_test_img 4M
+TEST_IMG="$TEST_IMG".3 _make_test_img 4M
+
+$QEMU_IO -c "write -P 0x11 0 64k" "$TEST_IMG".1 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "write -P 0x22 0 64k" "$TEST_IMG".2 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "write -P 0x33 0 64k" "$TEST_IMG".3 2>&1 | _filter_qemu_io | _filter_testdir
+
+$QEMU_IMG convert -O $IMGFMT "$TEST_IMG".[123] "$TEST_IMG"
+$QEMU_IMG map "$TEST_IMG" | _filter_qemu_img_map
+$QEMU_IO -c "read -P 0x11 0 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x22 4M 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x33 8M 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+$QEMU_IMG convert -c -O $IMGFMT "$TEST_IMG".[123] "$TEST_IMG"
+$QEMU_IMG map --output=json "$TEST_IMG" | _filter_qemu_img_map
+$QEMU_IO -c "read -P 0x11 0 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x22 4M 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x33 8M 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+# -B can't be combined with concatenation
+$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base "$TEST_IMG".[123] "$TEST_IMG"
+$QEMU_IMG convert -O $IMGFMT -c -B "$TEST_IMG".base "$TEST_IMG".[123] "$TEST_IMG"
+
+
+echo
+echo "=== Compression with misaligned allocations and image sizes ==="
+echo
+
+TEST_IMG="$TEST_IMG".1 _make_test_img 1023k -o cluster_size=1024
+TEST_IMG="$TEST_IMG".2 _make_test_img 1023k -o cluster_size=1024
+
+$QEMU_IO -c "write -P 0x11 16k 16k" "$TEST_IMG".1 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "write -P 0x22 130k 130k" "$TEST_IMG".1 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "write -P 0x33 1022k 1k" "$TEST_IMG".1 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "write -P 0x44 0k 1k" "$TEST_IMG".2 2>&1 | _filter_qemu_io | _filter_testdir
+
+$QEMU_IMG convert -c -O $IMGFMT "$TEST_IMG".[12] "$TEST_IMG"
+$QEMU_IMG map --output=json "$TEST_IMG" | _filter_qemu_img_map
+$QEMU_IO -c "read -P 0 0k 16k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x11 16k 16k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0 32k 98k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x22 130k 130k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0 260k 762k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x33 1022k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x44 1023k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0 1024k 1022k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+
+echo
+echo "=== Full allocation with -S 0 ==="
+echo
+
+# Standalone image
+_make_test_img 64M
+$QEMU_IO -c "write -P 0x22 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "write -P 0 3M 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo convert -S 0:
+$QEMU_IMG convert -O $IMGFMT -S 0 "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0 3M 61M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map
+
+echo
+echo convert -c -S 0:
+$QEMU_IMG convert -O $IMGFMT -c -S 0 "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0 3M 61M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map
+
+# With backing file
+TEST_IMG="$TEST_IMG".base _make_test_img 64M
+$QEMU_IO -c "write -P 0x11 0 32M" "$TEST_IMG".base 2>&1 | _filter_qemu_io | _filter_testdir
+
+_make_test_img -b "$TEST_IMG".base 64M
+$QEMU_IO -c "write -P 0x22 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo convert -S 0 with source backing file:
+$QEMU_IMG convert -O $IMGFMT -S 0 "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x11 3M 29M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0 32M 32M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map
+
+echo
+echo convert -c -S 0 with source backing file:
+$QEMU_IMG convert -O $IMGFMT -c -S 0 "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x11 3M 29M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0 32M 32M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map
+
+# With keeping the backing file
+echo
+echo convert -S 0 -B ...
+$QEMU_IMG convert -O $IMGFMT -S 0 "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x11 3M 29M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0 32M 32M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map
+
+echo
+echo convert -c -S 0 -B ...
+$QEMU_IMG convert -O $IMGFMT -c -S 0 "$TEST_IMG" "$TEST_IMG".orig
+$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x11 3M 29M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0 32M 32M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map
+
+
+echo
+echo "=== Non-zero -S ==="
+echo
+
+_make_test_img 64M -o cluster_size=1k
+$QEMU_IO -c "write -P 0 0 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "write 0 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "write 8k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "write 17k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+for min_sparse in 4k 8k; do
+ echo
+ echo convert -S $min_sparse
+ $QEMU_IMG convert -O $IMGFMT -o cluster_size=1k -S $min_sparse "$TEST_IMG" "$TEST_IMG".orig
+ $QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map
+
+ echo
+ echo convert -c -S $min_sparse
+ # For compressed images, -S values other than 0 are ignored
+ $QEMU_IMG convert -O $IMGFMT -o cluster_size=1k -c -S $min_sparse "$TEST_IMG" "$TEST_IMG".orig
+ $QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map
+done
+
+# success, all done
+echo '*** done'
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/122.out b/tests/qemu-iotests/122.out
new file mode 100644
index 0000000000..1f853b9e93
--- /dev/null
+++ b/tests/qemu-iotests/122.out
@@ -0,0 +1,209 @@
+QA output created by 122
+Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864
+wrote 67108864/67108864 bytes at offset 0
+64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+=== Check allocation status regression with -B ===
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 backing_file='TEST_DIR/t.IMGFMT.base'
+wrote 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+Offset Length File
+0 0x300000 TEST_DIR/t.IMGFMT.orig
+0x300000 0x3d00000 TEST_DIR/t.IMGFMT.base
+
+=== Check that zero clusters are kept in overlay ===
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 backing_file='TEST_DIR/t.IMGFMT.base'
+wrote 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+=== Concatenate multiple source images ===
+
+Formatting 'TEST_DIR/t.IMGFMT.1', fmt=IMGFMT size=4194304
+Formatting 'TEST_DIR/t.IMGFMT.2', fmt=IMGFMT size=4194304
+Formatting 'TEST_DIR/t.IMGFMT.3', fmt=IMGFMT size=4194304
+wrote 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+Offset Length File
+0 0x10000 TEST_DIR/t.IMGFMT
+0x400000 0x10000 TEST_DIR/t.IMGFMT
+0x800000 0x10000 TEST_DIR/t.IMGFMT
+read 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 65536/65536 bytes at offset 4194304
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 65536/65536 bytes at offset 8388608
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+[{ "start": 0, "length": 65536, "depth": 0, "zero": false, "data": true},
+{ "start": 65536, "length": 4128768, "depth": 0, "zero": true, "data": false},
+{ "start": 4194304, "length": 65536, "depth": 0, "zero": false, "data": true},
+{ "start": 4259840, "length": 4128768, "depth": 0, "zero": true, "data": false},
+{ "start": 8388608, "length": 65536, "depth": 0, "zero": false, "data": true},
+{ "start": 8454144, "length": 4128768, "depth": 0, "zero": true, "data": false}]
+read 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 65536/65536 bytes at offset 4194304
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 65536/65536 bytes at offset 8388608
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+qemu-img: -B makes no sense when concatenating multiple input images
+qemu-img: -B makes no sense when concatenating multiple input images
+
+=== Compression with misaligned allocations and image sizes ===
+
+Formatting 'TEST_DIR/t.IMGFMT.1', fmt=IMGFMT size=1047552
+Formatting 'TEST_DIR/t.IMGFMT.2', fmt=IMGFMT size=1047552
+wrote 16384/16384 bytes at offset 16384
+16 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 133120/133120 bytes at offset 133120
+130 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 1024/1024 bytes at offset 1046528
+1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 1024/1024 bytes at offset 0
+1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+[{ "start": 0, "length": 65536, "depth": 0, "zero": false, "data": true},
+{ "start": 65536, "length": 65536, "depth": 0, "zero": true, "data": false},
+{ "start": 131072, "length": 196608, "depth": 0, "zero": false, "data": true},
+{ "start": 327680, "length": 655360, "depth": 0, "zero": true, "data": false},
+{ "start": 983040, "length": 65536, "depth": 0, "zero": false, "data": true},
+{ "start": 1048576, "length": 1046528, "depth": 0, "zero": true, "data": false}]
+read 16384/16384 bytes at offset 0
+16 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 16384/16384 bytes at offset 16384
+16 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 100352/100352 bytes at offset 32768
+98 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 133120/133120 bytes at offset 133120
+130 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 780288/780288 bytes at offset 266240
+762 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 1024/1024 bytes at offset 1046528
+1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 1024/1024 bytes at offset 1047552
+1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 1046528/1046528 bytes at offset 1048576
+1022 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+=== Full allocation with -S 0 ===
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+wrote 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 3145728/3145728 bytes at offset 3145728
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+convert -S 0:
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 63963136/63963136 bytes at offset 3145728
+61 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+[{ "start": 0, "length": 6291456, "depth": 0, "zero": false, "data": true, "offset": 327680},
+{ "start": 6291456, "length": 60817408, "depth": 0, "zero": true, "data": false}]
+
+convert -c -S 0:
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 63963136/63963136 bytes at offset 3145728
+61 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+[{ "start": 0, "length": 6291456, "depth": 0, "zero": false, "data": true},
+{ "start": 6291456, "length": 60817408, "depth": 0, "zero": true, "data": false}]
+Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864
+wrote 33554432/33554432 bytes at offset 0
+32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 backing_file='TEST_DIR/t.IMGFMT.base'
+wrote 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+convert -S 0 with source backing file:
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 30408704/30408704 bytes at offset 3145728
+29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 33554432/33554432 bytes at offset 33554432
+32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true, "offset": 327680}]
+
+convert -c -S 0 with source backing file:
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 30408704/30408704 bytes at offset 3145728
+29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 33554432/33554432 bytes at offset 33554432
+32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true}]
+
+convert -S 0 -B ...
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 30408704/30408704 bytes at offset 3145728
+29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 33554432/33554432 bytes at offset 33554432
+32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true, "offset": 327680}]
+
+convert -c -S 0 -B ...
+read 3145728/3145728 bytes at offset 0
+3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 30408704/30408704 bytes at offset 3145728
+29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 33554432/33554432 bytes at offset 33554432
+32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true}]
+
+=== Non-zero -S ===
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+wrote 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 1024/1024 bytes at offset 0
+1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 1024/1024 bytes at offset 8192
+1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 1024/1024 bytes at offset 17408
+1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+convert -S 4k
+[{ "start": 0, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": 8192},
+{ "start": 1024, "length": 7168, "depth": 0, "zero": true, "data": false},
+{ "start": 8192, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": 9216},
+{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false},
+{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": 10240},
+{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}]
+
+convert -c -S 4k
+[{ "start": 0, "length": 1024, "depth": 0, "zero": false, "data": true},
+{ "start": 1024, "length": 7168, "depth": 0, "zero": true, "data": false},
+{ "start": 8192, "length": 1024, "depth": 0, "zero": false, "data": true},
+{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false},
+{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true},
+{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}]
+
+convert -S 8k
+[{ "start": 0, "length": 9216, "depth": 0, "zero": false, "data": true, "offset": 8192},
+{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false},
+{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": 17408},
+{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}]
+
+convert -c -S 8k
+[{ "start": 0, "length": 1024, "depth": 0, "zero": false, "data": true},
+{ "start": 1024, "length": 7168, "depth": 0, "zero": true, "data": false},
+{ "start": 8192, "length": 1024, "depth": 0, "zero": false, "data": true},
+{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false},
+{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true},
+{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}]
+*** done
diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124
new file mode 100644
index 0000000000..3ee78cd1f1
--- /dev/null
+++ b/tests/qemu-iotests/124
@@ -0,0 +1,363 @@
+#!/usr/bin/env python
+#
+# Tests for incremental drive-backup
+#
+# Copyright (C) 2015 John Snow for Red Hat, Inc.
+#
+# Based on 056.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import iotests
+
+
+def io_write_patterns(img, patterns):
+ for pattern in patterns:
+ iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
+
+
+def try_remove(img):
+ try:
+ os.remove(img)
+ except OSError:
+ pass
+
+
+class Bitmap:
+ def __init__(self, name, drive):
+ self.name = name
+ self.drive = drive
+ self.num = 0
+ self.backups = list()
+
+ def base_target(self):
+ return (self.drive['backup'], None)
+
+ def new_target(self, num=None):
+ if num is None:
+ num = self.num
+ self.num = num + 1
+ base = os.path.join(iotests.test_dir,
+ "%s.%s." % (self.drive['id'], self.name))
+ suff = "%i.%s" % (num, self.drive['fmt'])
+ target = base + "inc" + suff
+ reference = base + "ref" + suff
+ self.backups.append((target, reference))
+ return (target, reference)
+
+ def last_target(self):
+ if self.backups:
+ return self.backups[-1]
+ return self.base_target()
+
+ def del_target(self):
+ for image in self.backups.pop():
+ try_remove(image)
+ self.num -= 1
+
+ def cleanup(self):
+ for backup in self.backups:
+ for image in backup:
+ try_remove(image)
+
+
+class TestIncrementalBackup(iotests.QMPTestCase):
+ def setUp(self):
+ self.bitmaps = list()
+ self.files = list()
+ self.drives = list()
+ self.vm = iotests.VM()
+ self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
+
+ # Create a base image with a distinctive patterning
+ drive0 = self.add_node('drive0')
+ self.img_create(drive0['file'], drive0['fmt'])
+ self.vm.add_drive(drive0['file'])
+ io_write_patterns(drive0['file'], (('0x41', 0, 512),
+ ('0xd5', '1M', '32k'),
+ ('0xdc', '32M', '124k')))
+ self.vm.launch()
+
+
+ def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
+ if path is None:
+ path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
+ if backup is None:
+ backup = os.path.join(iotests.test_dir,
+ '%s.full.backup.%s' % (node_id, fmt))
+
+ self.drives.append({
+ 'id': node_id,
+ 'file': path,
+ 'backup': backup,
+ 'fmt': fmt })
+ return self.drives[-1]
+
+
+ def img_create(self, img, fmt=iotests.imgfmt, size='64M',
+ parent=None, parentFormat=None):
+ if parent:
+ if parentFormat is None:
+ parentFormat = fmt
+ iotests.qemu_img('create', '-f', fmt, img, size,
+ '-b', parent, '-F', parentFormat)
+ else:
+ iotests.qemu_img('create', '-f', fmt, img, size)
+ self.files.append(img)
+
+
+ def do_qmp_backup(self, error='Input/output error', **kwargs):
+ res = self.vm.qmp('drive-backup', **kwargs)
+ self.assert_qmp(res, 'return', {})
+
+ event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
+ match={'data': {'device': kwargs['device']}})
+ self.assertIsNotNone(event)
+
+ try:
+ failure = self.dictpath(event, 'data/error')
+ except AssertionError:
+ # Backup succeeded.
+ self.assert_qmp(event, 'data/offset', event['data']['len'])
+ return True
+ else:
+ # Backup failed.
+ self.assert_qmp(event, 'data/error', error)
+ return False
+
+
+ def create_anchor_backup(self, drive=None):
+ if drive is None:
+ drive = self.drives[-1]
+ res = self.do_qmp_backup(device=drive['id'], sync='full',
+ format=drive['fmt'], target=drive['backup'])
+ self.assertTrue(res)
+ self.files.append(drive['backup'])
+ return drive['backup']
+
+
+ def make_reference_backup(self, bitmap=None):
+ if bitmap is None:
+ bitmap = self.bitmaps[-1]
+ _, reference = bitmap.last_target()
+ res = self.do_qmp_backup(device=bitmap.drive['id'], sync='full',
+ format=bitmap.drive['fmt'], target=reference)
+ self.assertTrue(res)
+
+
+ def add_bitmap(self, name, drive, **kwargs):
+ bitmap = Bitmap(name, drive)
+ self.bitmaps.append(bitmap)
+ result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
+ name=bitmap.name, **kwargs)
+ self.assert_qmp(result, 'return', {})
+ return bitmap
+
+
+ def prepare_backup(self, bitmap=None, parent=None):
+ if bitmap is None:
+ bitmap = self.bitmaps[-1]
+ if parent is None:
+ parent, _ = bitmap.last_target()
+
+ target, _ = bitmap.new_target()
+ self.img_create(target, bitmap.drive['fmt'], parent=parent)
+ return target
+
+
+ def create_incremental(self, bitmap=None, parent=None,
+ parentFormat=None, validate=True):
+ if bitmap is None:
+ bitmap = self.bitmaps[-1]
+ if parent is None:
+ parent, _ = bitmap.last_target()
+
+ target = self.prepare_backup(bitmap, parent)
+ res = self.do_qmp_backup(device=bitmap.drive['id'],
+ sync='dirty-bitmap', bitmap=bitmap.name,
+ format=bitmap.drive['fmt'], target=target,
+ mode='existing')
+ if not res:
+ bitmap.del_target();
+ self.assertFalse(validate)
+ else:
+ self.make_reference_backup(bitmap)
+ return res
+
+
+ def check_backups(self):
+ for bitmap in self.bitmaps:
+ for incremental, reference in bitmap.backups:
+ self.assertTrue(iotests.compare_images(incremental, reference))
+ last = bitmap.last_target()[0]
+ self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
+
+
+ def hmp_io_writes(self, drive, patterns):
+ for pattern in patterns:
+ self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
+ self.vm.hmp_qemu_io(drive, 'flush')
+
+
+ def do_incremental_simple(self, **kwargs):
+ self.create_anchor_backup()
+ self.add_bitmap('bitmap0', self.drives[0], **kwargs)
+
+ # Sanity: Create a "hollow" incremental backup
+ self.create_incremental()
+ # Three writes: One complete overwrite, one new segment,
+ # and one partial overlap.
+ self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
+ ('0xfe', '16M', '256k'),
+ ('0x64', '32736k', '64k')))
+ self.create_incremental()
+ # Three more writes, one of each kind, like above
+ self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
+ ('0x55', '8M', '352k'),
+ ('0x78', '15872k', '1M')))
+ self.create_incremental()
+ self.vm.shutdown()
+ self.check_backups()
+
+
+ def test_incremental_simple(self):
+ '''
+ Test: Create and verify three incremental backups.
+
+ Create a bitmap and a full backup before VM execution begins,
+ then create a series of three incremental backups "during execution,"
+ i.e.; after IO requests begin modifying the drive.
+ '''
+ return self.do_incremental_simple()
+
+
+ def test_small_granularity(self):
+ '''
+ Test: Create and verify backups made with a small granularity bitmap.
+
+ Perform the same test as test_incremental_simple, but with a granularity
+ of only 32KiB instead of the present default of 64KiB.
+ '''
+ return self.do_incremental_simple(granularity=32768)
+
+
+ def test_large_granularity(self):
+ '''
+ Test: Create and verify backups made with a large granularity bitmap.
+
+ Perform the same test as test_incremental_simple, but with a granularity
+ of 128KiB instead of the present default of 64KiB.
+ '''
+ return self.do_incremental_simple(granularity=131072)
+
+
+ def test_incremental_failure(self):
+ '''Test: Verify backups made after a failure are correct.
+
+ Simulate a failure during an incremental backup block job,
+ emulate additional writes, then create another incremental backup
+ afterwards and verify that the backup created is correct.
+ '''
+
+ # Create a blkdebug interface to this img as 'drive1',
+ # but don't actually create a new image.
+ drive1 = self.add_node('drive1', self.drives[0]['fmt'],
+ path=self.drives[0]['file'],
+ backup=self.drives[0]['backup'])
+ result = self.vm.qmp('blockdev-add', options={
+ 'id': drive1['id'],
+ 'driver': drive1['fmt'],
+ 'file': {
+ 'driver': 'blkdebug',
+ 'image': {
+ 'driver': 'file',
+ 'filename': drive1['file']
+ },
+ 'set-state': [{
+ 'event': 'flush_to_disk',
+ 'state': 1,
+ 'new_state': 2
+ }],
+ 'inject-error': [{
+ 'event': 'read_aio',
+ 'errno': 5,
+ 'state': 2,
+ 'immediately': False,
+ 'once': True
+ }],
+ }
+ })
+ self.assert_qmp(result, 'return', {})
+
+ self.create_anchor_backup(self.drives[0])
+ self.add_bitmap('bitmap0', drive1)
+ # Note: at this point, during a normal execution,
+ # Assume that the VM resumes and begins issuing IO requests here.
+
+ self.hmp_io_writes(drive1['id'], (('0xab', 0, 512),
+ ('0xfe', '16M', '256k'),
+ ('0x64', '32736k', '64k')))
+
+ result = self.create_incremental(validate=False)
+ self.assertFalse(result)
+ self.hmp_io_writes(drive1['id'], (('0x9a', 0, 512),
+ ('0x55', '8M', '352k'),
+ ('0x78', '15872k', '1M')))
+ self.create_incremental()
+ self.vm.shutdown()
+ self.check_backups()
+
+
+ def test_sync_dirty_bitmap_missing(self):
+ self.assert_no_active_block_jobs()
+ self.files.append(self.err_img)
+ result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
+ sync='dirty-bitmap', format=self.drives[0]['fmt'],
+ target=self.err_img)
+ self.assert_qmp(result, 'error/class', 'GenericError')
+
+
+ def test_sync_dirty_bitmap_not_found(self):
+ self.assert_no_active_block_jobs()
+ self.files.append(self.err_img)
+ result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
+ sync='dirty-bitmap', bitmap='unknown',
+ format=self.drives[0]['fmt'], target=self.err_img)
+ self.assert_qmp(result, 'error/class', 'GenericError')
+
+
+ def test_sync_dirty_bitmap_bad_granularity(self):
+ '''
+ Test: Test what happens if we provide an improper granularity.
+
+ The granularity must always be a power of 2.
+ '''
+ self.assert_no_active_block_jobs()
+ self.assertRaises(AssertionError, self.add_bitmap,
+ 'bitmap0', self.drives[0],
+ granularity=64000)
+
+
+ def tearDown(self):
+ self.vm.shutdown()
+ for bitmap in self.bitmaps:
+ bitmap.cleanup()
+ for filename in self.files:
+ try_remove(filename)
+
+
+if __name__ == '__main__':
+ iotests.main(supported_fmts=['qcow2'])
diff --git a/tests/qemu-iotests/124.out b/tests/qemu-iotests/124.out
new file mode 100644
index 0000000000..2f7d3902f2
--- /dev/null
+++ b/tests/qemu-iotests/124.out
@@ -0,0 +1,5 @@
+.......
+----------------------------------------------------------------------
+Ran 7 tests
+
+OK
diff --git a/tests/qemu-iotests/129 b/tests/qemu-iotests/129
new file mode 100644
index 0000000000..9e87e1c8d9
--- /dev/null
+++ b/tests/qemu-iotests/129
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+#
+# Tests that "bdrv_drain_all" doesn't drain block jobs
+#
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import iotests
+import time
+
+class TestStopWithBlockJob(iotests.QMPTestCase):
+ test_img = os.path.join(iotests.test_dir, 'test.img')
+ target_img = os.path.join(iotests.test_dir, 'target.img')
+ base_img = os.path.join(iotests.test_dir, 'base.img')
+
+ def setUp(self):
+ iotests.qemu_img('create', '-f', iotests.imgfmt, self.base_img, "1G")
+ iotests.qemu_img('create', '-f', iotests.imgfmt, self.test_img, "-b", self.base_img)
+ iotests.qemu_io('-f', iotests.imgfmt, '-c', 'write -P0x5d 1M 128M', self.test_img)
+ self.vm = iotests.VM().add_drive(self.test_img)
+ self.vm.launch()
+
+ def tearDown(self):
+ params = {"device": "drive0",
+ "bps": 0,
+ "bps_rd": 0,
+ "bps_wr": 0,
+ "iops": 0,
+ "iops_rd": 0,
+ "iops_wr": 0,
+ }
+ result = self.vm.qmp("block_set_io_throttle", conv_keys=False,
+ **params)
+ self.vm.shutdown()
+
+ def do_test_stop(self, cmd, **args):
+ """Test 'stop' while block job is running on a throttled drive.
+ The 'stop' command shouldn't drain the job"""
+ params = {"device": "drive0",
+ "bps": 1024,
+ "bps_rd": 0,
+ "bps_wr": 0,
+ "iops": 0,
+ "iops_rd": 0,
+ "iops_wr": 0,
+ }
+ result = self.vm.qmp("block_set_io_throttle", conv_keys=False,
+ **params)
+ self.assert_qmp(result, 'return', {})
+ result = self.vm.qmp(cmd, **args)
+ self.assert_qmp(result, 'return', {})
+ result = self.vm.qmp("stop")
+ self.assert_qmp(result, 'return', {})
+ result = self.vm.qmp("query-block-jobs")
+ self.assert_qmp(result, 'return[0]/busy', True)
+ self.assert_qmp(result, 'return[0]/ready', False)
+
+ def test_drive_mirror(self):
+ self.do_test_stop("drive-mirror", device="drive0",
+ target=self.target_img,
+ sync="full")
+
+ def test_drive_backup(self):
+ self.do_test_stop("drive-backup", device="drive0",
+ target=self.target_img,
+ sync="full")
+
+ def test_block_commit(self):
+ self.do_test_stop("block-commit", device="drive0")
+
+if __name__ == '__main__':
+ iotests.main(supported_fmts=["qcow2"])
diff --git a/tests/qemu-iotests/129.out b/tests/qemu-iotests/129.out
new file mode 100644
index 0000000000..8d7e996700
--- /dev/null
+++ b/tests/qemu-iotests/129.out
@@ -0,0 +1,5 @@
+...
+----------------------------------------------------------------------
+Ran 3 tests
+
+OK
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index bcf25786ab..6ca3466ec5 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -122,6 +122,9 @@
115 rw auto
116 rw auto quick
121 rw auto
+122 rw auto
123 rw auto quick
+124 rw auto backing
128 rw auto quick
+129 rw auto quick
130 rw auto quick
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 14028540b3..e93e62387b 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -78,6 +78,23 @@ def create_image(name, size):
i = i + 512
file.close()
+# Test if 'match' is a recursive subset of 'event'
+def event_match(event, match=None):
+ if match is None:
+ return True
+
+ for key in match:
+ if key in event:
+ if isinstance(event[key], dict):
+ if not event_match(event[key], match[key]):
+ return False
+ elif event[key] != match[key]:
+ return False
+ else:
+ return False
+
+ return True
+
class VM(object):
'''A QEMU VM'''
@@ -92,6 +109,7 @@ class VM(object):
'-machine', 'accel=qtest',
'-display', 'none', '-vga', 'none']
self._num_drives = 0
+ self._events = []
# This can be used to add an unused monitor instance.
def add_monitor_telnet(self, ip, port):
@@ -202,14 +220,34 @@ class VM(object):
def get_qmp_event(self, wait=False):
'''Poll for one queued QMP events and return it'''
+ if len(self._events) > 0:
+ return self._events.pop(0)
return self._qmp.pull_event(wait=wait)
def get_qmp_events(self, wait=False):
'''Poll for queued QMP events and return a list of dicts'''
events = self._qmp.get_events(wait=wait)
+ events.extend(self._events)
+ del self._events[:]
self._qmp.clear_events()
return events
+ def event_wait(self, name='BLOCK_JOB_COMPLETED', timeout=60.0, match=None):
+ # Search cached events
+ for event in self._events:
+ if (event['event'] == name) and event_match(event, match):
+ self._events.remove(event)
+ return event
+
+ # Poll for new events
+ while True:
+ event = self._qmp.pull_event(wait=timeout)
+ if (event['event'] == name) and event_match(event, match):
+ return event
+ self._events.append(event)
+
+ return None
+
index_re = re.compile(r'([^\[]+)\[([^\]]+)\]')
class QMPTestCase(unittest.TestCase):
diff --git a/tests/test-aio.c b/tests/test-aio.c
index a7cb5c9915..4b0cb45d31 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -107,6 +107,7 @@ static void test_notify(void)
typedef struct {
QemuMutex start_lock;
+ EventNotifier notifier;
bool thread_acquired;
} AcquireTestData;
@@ -118,6 +119,8 @@ static void *test_acquire_thread(void *opaque)
qemu_mutex_lock(&data->start_lock);
qemu_mutex_unlock(&data->start_lock);
+ g_usleep(500000);
+ event_notifier_set(&data->notifier);
aio_context_acquire(ctx);
aio_context_release(ctx);
@@ -126,20 +129,19 @@ static void *test_acquire_thread(void *opaque)
return NULL;
}
-static void dummy_notifier_read(EventNotifier *unused)
+static void dummy_notifier_read(EventNotifier *n)
{
- g_assert(false); /* should never be invoked */
+ event_notifier_test_and_clear(n);
}
static void test_acquire(void)
{
QemuThread thread;
- EventNotifier notifier;
AcquireTestData data;
/* Dummy event notifier ensures aio_poll() will block */
- event_notifier_init(&notifier, false);
- aio_set_event_notifier(ctx, &notifier, dummy_notifier_read);
+ event_notifier_init(&data.notifier, false);
+ aio_set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
qemu_mutex_init(&data.start_lock);
@@ -153,12 +155,13 @@ static void test_acquire(void)
/* Block in aio_poll(), let other thread kick us and acquire context */
aio_context_acquire(ctx);
qemu_mutex_unlock(&data.start_lock); /* let the thread run */
- g_assert(!aio_poll(ctx, true));
+ g_assert(aio_poll(ctx, true));
+ g_assert(!data.thread_acquired);
aio_context_release(ctx);
qemu_thread_join(&thread);
- aio_set_event_notifier(ctx, &notifier, NULL);
- event_notifier_cleanup(&notifier);
+ aio_set_event_notifier(ctx, &data.notifier, NULL);
+ event_notifier_cleanup(&data.notifier);
g_assert(data.thread_acquired);
}
diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c
index 8c902f2055..9f41b5fd2e 100644
--- a/tests/test-hbitmap.c
+++ b/tests/test-hbitmap.c
@@ -11,6 +11,8 @@
#include <glib.h>
#include <stdarg.h>
+#include <string.h>
+#include <sys/types.h>
#include "qemu/hbitmap.h"
#define LOG_BITS_PER_LONG (BITS_PER_LONG == 32 ? 5 : 6)
@@ -23,6 +25,7 @@ typedef struct TestHBitmapData {
HBitmap *hb;
unsigned long *bits;
size_t size;
+ size_t old_size;
int granularity;
} TestHBitmapData;
@@ -91,6 +94,44 @@ static void hbitmap_test_init(TestHBitmapData *data,
}
}
+static inline size_t hbitmap_test_array_size(size_t bits)
+{
+ size_t n = (bits + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ return n ? n : 1;
+}
+
+static void hbitmap_test_truncate_impl(TestHBitmapData *data,
+ size_t size)
+{
+ size_t n;
+ size_t m;
+ data->old_size = data->size;
+ data->size = size;
+
+ if (data->size == data->old_size) {
+ return;
+ }
+
+ n = hbitmap_test_array_size(size);
+ m = hbitmap_test_array_size(data->old_size);
+ data->bits = g_realloc(data->bits, sizeof(unsigned long) * n);
+ if (n > m) {
+ memset(&data->bits[m], 0x00, sizeof(unsigned long) * (n - m));
+ }
+
+ /* If we shrink to an uneven multiple of sizeof(unsigned long),
+ * scrub the leftover memory. */
+ if (data->size < data->old_size) {
+ m = size % (sizeof(unsigned long) * 8);
+ if (m) {
+ unsigned long mask = (1ULL << m) - 1;
+ data->bits[n-1] &= mask;
+ }
+ }
+
+ hbitmap_truncate(data->hb, size);
+}
+
static void hbitmap_test_teardown(TestHBitmapData *data,
const void *unused)
{
@@ -369,6 +410,198 @@ static void test_hbitmap_iter_granularity(TestHBitmapData *data,
g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0);
}
+static void hbitmap_test_set_boundary_bits(TestHBitmapData *data, ssize_t diff)
+{
+ size_t size = data->size;
+
+ /* First bit */
+ hbitmap_test_set(data, 0, 1);
+ if (diff < 0) {
+ /* Last bit in new, shortened map */
+ hbitmap_test_set(data, size + diff - 1, 1);
+
+ /* First bit to be truncated away */
+ hbitmap_test_set(data, size + diff, 1);
+ }
+ /* Last bit */
+ hbitmap_test_set(data, size - 1, 1);
+ if (data->granularity == 0) {
+ hbitmap_test_check_get(data);
+ }
+}
+
+static void hbitmap_test_check_boundary_bits(TestHBitmapData *data)
+{
+ size_t size = MIN(data->size, data->old_size);
+
+ if (data->granularity == 0) {
+ hbitmap_test_check_get(data);
+ hbitmap_test_check(data, 0);
+ } else {
+ /* If a granularity was set, note that every distinct
+ * (bit >> granularity) value that was set will increase
+ * the bit pop count by 2^granularity, not just 1.
+ *
+ * The hbitmap_test_check facility does not currently tolerate
+ * non-zero granularities, so test the boundaries and the population
+ * count manually.
+ */
+ g_assert(hbitmap_get(data->hb, 0));
+ g_assert(hbitmap_get(data->hb, size - 1));
+ g_assert_cmpint(2 << data->granularity, ==, hbitmap_count(data->hb));
+ }
+}
+
+/* Generic truncate test. */
+static void hbitmap_test_truncate(TestHBitmapData *data,
+ size_t size,
+ ssize_t diff,
+ int granularity)
+{
+ hbitmap_test_init(data, size, granularity);
+ hbitmap_test_set_boundary_bits(data, diff);
+ hbitmap_test_truncate_impl(data, size + diff);
+ hbitmap_test_check_boundary_bits(data);
+}
+
+static void test_hbitmap_truncate_nop(TestHBitmapData *data,
+ const void *unused)
+{
+ hbitmap_test_truncate(data, L2, 0, 0);
+}
+
+/**
+ * Grow by an amount smaller than the granularity, without crossing
+ * a granularity alignment boundary. Effectively a NOP.
+ */
+static void test_hbitmap_truncate_grow_negligible(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2 - 1;
+ size_t diff = 1;
+ int granularity = 1;
+
+ hbitmap_test_truncate(data, size, diff, granularity);
+}
+
+/**
+ * Shrink by an amount smaller than the granularity, without crossing
+ * a granularity alignment boundary. Effectively a NOP.
+ */
+static void test_hbitmap_truncate_shrink_negligible(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2;
+ ssize_t diff = -1;
+ int granularity = 1;
+
+ hbitmap_test_truncate(data, size, diff, granularity);
+}
+
+/**
+ * Grow by an amount smaller than the granularity, but crossing over
+ * a granularity alignment boundary.
+ */
+static void test_hbitmap_truncate_grow_tiny(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2 - 2;
+ ssize_t diff = 1;
+ int granularity = 1;
+
+ hbitmap_test_truncate(data, size, diff, granularity);
+}
+
+/**
+ * Shrink by an amount smaller than the granularity, but crossing over
+ * a granularity alignment boundary.
+ */
+static void test_hbitmap_truncate_shrink_tiny(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2 - 1;
+ ssize_t diff = -1;
+ int granularity = 1;
+
+ hbitmap_test_truncate(data, size, diff, granularity);
+}
+
+/**
+ * Grow by an amount smaller than sizeof(long), and not crossing over
+ * a sizeof(long) alignment boundary.
+ */
+static void test_hbitmap_truncate_grow_small(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2 + 1;
+ size_t diff = sizeof(long) / 2;
+
+ hbitmap_test_truncate(data, size, diff, 0);
+}
+
+/**
+ * Shrink by an amount smaller than sizeof(long), and not crossing over
+ * a sizeof(long) alignment boundary.
+ */
+static void test_hbitmap_truncate_shrink_small(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2;
+ size_t diff = sizeof(long) / 2;
+
+ hbitmap_test_truncate(data, size, -diff, 0);
+}
+
+/**
+ * Grow by an amount smaller than sizeof(long), while crossing over
+ * a sizeof(long) alignment boundary.
+ */
+static void test_hbitmap_truncate_grow_medium(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2 - 1;
+ size_t diff = sizeof(long) / 2;
+
+ hbitmap_test_truncate(data, size, diff, 0);
+}
+
+/**
+ * Shrink by an amount smaller than sizeof(long), while crossing over
+ * a sizeof(long) alignment boundary.
+ */
+static void test_hbitmap_truncate_shrink_medium(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2 + 1;
+ size_t diff = sizeof(long) / 2;
+
+ hbitmap_test_truncate(data, size, -diff, 0);
+}
+
+/**
+ * Grow by an amount larger than sizeof(long).
+ */
+static void test_hbitmap_truncate_grow_large(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2;
+ size_t diff = 8 * sizeof(long);
+
+ hbitmap_test_truncate(data, size, diff, 0);
+}
+
+/**
+ * Shrink by an amount larger than sizeof(long).
+ */
+static void test_hbitmap_truncate_shrink_large(TestHBitmapData *data,
+ const void *unused)
+{
+ size_t size = L2;
+ size_t diff = 8 * sizeof(long);
+
+ hbitmap_test_truncate(data, size, -diff, 0);
+}
+
static void hbitmap_test_add(const char *testpath,
void (*test_func)(TestHBitmapData *data, const void *user_data))
{
@@ -395,6 +628,28 @@ int main(int argc, char **argv)
hbitmap_test_add("/hbitmap/reset/empty", test_hbitmap_reset_empty);
hbitmap_test_add("/hbitmap/reset/general", test_hbitmap_reset);
hbitmap_test_add("/hbitmap/granularity", test_hbitmap_granularity);
+
+ hbitmap_test_add("/hbitmap/truncate/nop", test_hbitmap_truncate_nop);
+ hbitmap_test_add("/hbitmap/truncate/grow/negligible",
+ test_hbitmap_truncate_grow_negligible);
+ hbitmap_test_add("/hbitmap/truncate/shrink/negligible",
+ test_hbitmap_truncate_shrink_negligible);
+ hbitmap_test_add("/hbitmap/truncate/grow/tiny",
+ test_hbitmap_truncate_grow_tiny);
+ hbitmap_test_add("/hbitmap/truncate/shrink/tiny",
+ test_hbitmap_truncate_shrink_tiny);
+ hbitmap_test_add("/hbitmap/truncate/grow/small",
+ test_hbitmap_truncate_grow_small);
+ hbitmap_test_add("/hbitmap/truncate/shrink/small",
+ test_hbitmap_truncate_shrink_small);
+ hbitmap_test_add("/hbitmap/truncate/grow/medium",
+ test_hbitmap_truncate_grow_medium);
+ hbitmap_test_add("/hbitmap/truncate/shrink/medium",
+ test_hbitmap_truncate_shrink_medium);
+ hbitmap_test_add("/hbitmap/truncate/grow/large",
+ test_hbitmap_truncate_grow_large);
+ hbitmap_test_add("/hbitmap/truncate/shrink/large",
+ test_hbitmap_truncate_shrink_large);
g_test_run();
return 0;