aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2016-10-10 15:19:20 +0100
committerPeter Maydell <peter.maydell@linaro.org>2016-10-10 15:19:20 +0100
commit0f183e679d85fec74fc83f35f973cf8e56d97861 (patch)
tree24ab65457f2b67288502b78e6f179e4ed695171b /block
parenta20fd901afdb453b8afc578a7be14703c3a36300 (diff)
parent9c7f3fcae714925153c4c5ada086ca4fcf6bbbd8 (diff)
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
Block layer patches # gpg: Signature made Mon 10 Oct 2016 12:33:14 BST # gpg: using RSA key 0x7F09B272C88F2FD6 # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" # Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6 * remotes/kevin/tags/for-upstream: dmg: Move libbz2 code to dmg-bz2.so module: Don't load the same module if requested multiple times scripts: Allow block module to not define BlockDriver block: Add qdev ID to DEVICE_TRAY_MOVED block-backend: Remember if attached device is non-qdev block: Add node name to BLOCK_IO_ERROR event block: Add bdrv_runtime_opts to query-command-line-options block: use aio_bh_schedule_oneshot async: add aio_bh_schedule_oneshot block: use bdrv_add_before_write_notifier Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'block')
-rw-r--r--block/Makefile.objs3
-rw-r--r--block/archipelago.c5
-rw-r--r--block/blkdebug.c7
-rw-r--r--block/blkverify.c8
-rw-r--r--block/block-backend.c89
-rw-r--r--block/curl.c7
-rw-r--r--block/dmg-bz2.c61
-rw-r--r--block/dmg.c69
-rw-r--r--block/dmg.h59
-rw-r--r--block/gluster.c6
-rw-r--r--block/io.c11
-rw-r--r--block/iscsi.c7
-rw-r--r--block/nfs.c7
-rw-r--r--block/null.c5
-rw-r--r--block/qed.c6
-rw-r--r--block/qed.h1
-rw-r--r--block/rbd.c8
-rw-r--r--block/write-threshold.c3
18 files changed, 220 insertions, 142 deletions
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 7d4031dae5..67a036a1df 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -41,6 +41,7 @@ gluster.o-libs := $(GLUSTERFS_LIBS)
ssh.o-cflags := $(LIBSSH2_CFLAGS)
ssh.o-libs := $(LIBSSH2_LIBS)
archipelago.o-libs := $(ARCHIPELAGO_LIBS)
-dmg.o-libs := $(BZIP2_LIBS)
+block-obj-$(if $(CONFIG_BZIP2),m,n) += dmg-bz2.o
+dmg-bz2.o-libs := $(BZIP2_LIBS)
qcow.o-libs := -lz
linux-aio.o-libs := -laio
diff --git a/block/archipelago.c b/block/archipelago.c
index 37b8aca78d..2449cfc702 100644
--- a/block/archipelago.c
+++ b/block/archipelago.c
@@ -87,7 +87,6 @@ typedef enum {
typedef struct ArchipelagoAIOCB {
BlockAIOCB common;
- QEMUBH *bh;
struct BDRVArchipelagoState *s;
QEMUIOVector *qiov;
ARCHIPCmd cmd;
@@ -154,11 +153,10 @@ static void archipelago_finish_aiocb(AIORequestData *reqdata)
} else if (reqdata->aio_cb->ret == reqdata->segreq->total) {
reqdata->aio_cb->ret = 0;
}
- reqdata->aio_cb->bh = aio_bh_new(
+ aio_bh_schedule_oneshot(
bdrv_get_aio_context(reqdata->aio_cb->common.bs),
qemu_archipelago_complete_aio, reqdata
);
- qemu_bh_schedule(reqdata->aio_cb->bh);
}
static int wait_reply(struct xseg *xseg, xport srcport, struct xseg_port *port,
@@ -313,7 +311,6 @@ static void qemu_archipelago_complete_aio(void *opaque)
AIORequestData *reqdata = (AIORequestData *) opaque;
ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
- qemu_bh_delete(aio_cb->bh);
aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
aio_cb->status = 0;
diff --git a/block/blkdebug.c b/block/blkdebug.c
index d5db166815..4127571454 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -49,7 +49,6 @@ typedef struct BDRVBlkdebugState {
typedef struct BlkdebugAIOCB {
BlockAIOCB common;
- QEMUBH *bh;
int ret;
} BlkdebugAIOCB;
@@ -410,7 +409,6 @@ out:
static void error_callback_bh(void *opaque)
{
struct BlkdebugAIOCB *acb = opaque;
- qemu_bh_delete(acb->bh);
acb->common.cb(acb->common.opaque, acb->ret);
qemu_aio_unref(acb);
}
@@ -421,7 +419,6 @@ static BlockAIOCB *inject_error(BlockDriverState *bs,
BDRVBlkdebugState *s = bs->opaque;
int error = rule->options.inject.error;
struct BlkdebugAIOCB *acb;
- QEMUBH *bh;
bool immediately = rule->options.inject.immediately;
if (rule->options.inject.once) {
@@ -436,9 +433,7 @@ static BlockAIOCB *inject_error(BlockDriverState *bs,
acb = qemu_aio_get(&blkdebug_aiocb_info, bs, cb, opaque);
acb->ret = -error;
- bh = aio_bh_new(bdrv_get_aio_context(bs), error_callback_bh, acb);
- acb->bh = bh;
- qemu_bh_schedule(bh);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), error_callback_bh, acb);
return &acb->common;
}
diff --git a/block/blkverify.c b/block/blkverify.c
index da62d75969..28f9af6dba 100644
--- a/block/blkverify.c
+++ b/block/blkverify.c
@@ -22,7 +22,6 @@ typedef struct {
typedef struct BlkverifyAIOCB BlkverifyAIOCB;
struct BlkverifyAIOCB {
BlockAIOCB common;
- QEMUBH *bh;
/* Request metadata */
bool is_write;
@@ -175,7 +174,6 @@ static BlkverifyAIOCB *blkverify_aio_get(BlockDriverState *bs, bool is_write,
{
BlkverifyAIOCB *acb = qemu_aio_get(&blkverify_aiocb_info, bs, cb, opaque);
- acb->bh = NULL;
acb->is_write = is_write;
acb->sector_num = sector_num;
acb->nb_sectors = nb_sectors;
@@ -191,7 +189,6 @@ static void blkverify_aio_bh(void *opaque)
{
BlkverifyAIOCB *acb = opaque;
- qemu_bh_delete(acb->bh);
if (acb->buf) {
qemu_iovec_destroy(&acb->raw_qiov);
qemu_vfree(acb->buf);
@@ -218,9 +215,8 @@ static void blkverify_aio_cb(void *opaque, int ret)
acb->verify(acb);
}
- acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
- blkverify_aio_bh, acb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
+ blkverify_aio_bh, acb);
break;
}
}
diff --git a/block/block-backend.c b/block/block-backend.c
index 639294b8e6..1a724a8d89 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -38,6 +38,7 @@ struct BlockBackend {
BlockBackendPublic public;
void *dev; /* attached device model, if any */
+ bool legacy_dev; /* true if dev is not a DeviceState */
/* TODO change to DeviceState when all users are qdevified */
const BlockDevOps *dev_ops;
void *dev_opaque;
@@ -65,7 +66,6 @@ struct BlockBackend {
typedef struct BlockBackendAIOCB {
BlockAIOCB common;
- QEMUBH *bh;
BlockBackend *blk;
int ret;
} BlockBackendAIOCB;
@@ -507,32 +507,38 @@ void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
}
}
-/*
- * Attach device model @dev to @blk.
- * Return 0 on success, -EBUSY when a device model is attached already.
- */
-int blk_attach_dev(BlockBackend *blk, void *dev)
-/* TODO change to DeviceState *dev when all users are qdevified */
+static int blk_do_attach_dev(BlockBackend *blk, void *dev)
{
if (blk->dev) {
return -EBUSY;
}
blk_ref(blk);
blk->dev = dev;
+ blk->legacy_dev = false;
blk_iostatus_reset(blk);
return 0;
}
/*
* Attach device model @dev to @blk.
+ * Return 0 on success, -EBUSY when a device model is attached already.
+ */
+int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
+{
+ return blk_do_attach_dev(blk, dev);
+}
+
+/*
+ * Attach device model @dev to @blk.
* @blk must not have a device model attached already.
* TODO qdevified devices don't use this, remove when devices are qdevified
*/
-void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
+void blk_attach_dev_legacy(BlockBackend *blk, void *dev)
{
- if (blk_attach_dev(blk, dev) < 0) {
+ if (blk_do_attach_dev(blk, dev) < 0) {
abort();
}
+ blk->legacy_dev = true;
}
/*
@@ -559,6 +565,23 @@ void *blk_get_attached_dev(BlockBackend *blk)
return blk->dev;
}
+/* Return the qdev ID, or if no ID is assigned the QOM path, of the block
+ * device attached to the BlockBackend. */
+static char *blk_get_attached_dev_id(BlockBackend *blk)
+{
+ DeviceState *dev;
+
+ assert(!blk->legacy_dev);
+ dev = blk->dev;
+
+ if (!dev) {
+ return g_strdup("");
+ } else if (dev->id) {
+ return g_strdup(dev->id);
+ }
+ return object_get_canonical_path(OBJECT(dev));
+}
+
/*
* Return the BlockBackend which has the device model @dev attached if it
* exists, else null.
@@ -586,6 +609,11 @@ BlockBackend *blk_by_dev(void *dev)
void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
void *opaque)
{
+ /* All drivers that use blk_set_dev_ops() are qdevified and we want to keep
+ * it that way, so we can assume blk->dev is a DeviceState if blk->dev_ops
+ * is set. */
+ assert(!blk->legacy_dev);
+
blk->dev_ops = ops;
blk->dev_opaque = opaque;
}
@@ -601,13 +629,17 @@ void blk_dev_change_media_cb(BlockBackend *blk, bool load)
if (blk->dev_ops && blk->dev_ops->change_media_cb) {
bool tray_was_open, tray_is_open;
+ assert(!blk->legacy_dev);
+
tray_was_open = blk_dev_is_tray_open(blk);
blk->dev_ops->change_media_cb(blk->dev_opaque, load);
tray_is_open = blk_dev_is_tray_open(blk);
if (tray_was_open != tray_is_open) {
- qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
+ char *id = blk_get_attached_dev_id(blk);
+ qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open,
&error_abort);
+ g_free(id);
}
}
}
@@ -898,7 +930,6 @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
- qemu_bh_delete(acb->bh);
acb->common.cb(acb->common.opaque, acb->ret);
qemu_aio_unref(acb);
}
@@ -908,16 +939,12 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
void *opaque, int ret)
{
struct BlockBackendAIOCB *acb;
- QEMUBH *bh;
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
acb->blk = blk;
acb->ret = ret;
- bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
- acb->bh = bh;
- qemu_bh_schedule(bh);
-
+ aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb);
return &acb->common;
}
@@ -926,7 +953,6 @@ typedef struct BlkAioEmAIOCB {
BlkRwCo rwco;
int bytes;
bool has_returned;
- QEMUBH* bh;
} BlkAioEmAIOCB;
static const AIOCBInfo blk_aio_em_aiocb_info = {
@@ -935,10 +961,6 @@ static const AIOCBInfo blk_aio_em_aiocb_info = {
static void blk_aio_complete(BlkAioEmAIOCB *acb)
{
- if (acb->bh) {
- assert(acb->has_returned);
- qemu_bh_delete(acb->bh);
- }
if (acb->has_returned) {
acb->common.cb(acb->common.opaque, acb->rwco.ret);
qemu_aio_unref(acb);
@@ -947,7 +969,10 @@ static void blk_aio_complete(BlkAioEmAIOCB *acb)
static void blk_aio_complete_bh(void *opaque)
{
- blk_aio_complete(opaque);
+ BlkAioEmAIOCB *acb = opaque;
+
+ assert(acb->has_returned);
+ blk_aio_complete(acb);
}
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
@@ -967,7 +992,6 @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
.ret = NOT_DONE,
};
acb->bytes = bytes;
- acb->bh = NULL;
acb->has_returned = false;
co = qemu_coroutine_create(co_entry, acb);
@@ -975,8 +999,8 @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
acb->has_returned = true;
if (acb->rwco.ret != NOT_DONE) {
- acb->bh = aio_bh_new(blk_get_aio_context(blk), blk_aio_complete_bh, acb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(blk_get_aio_context(blk),
+ blk_aio_complete_bh, acb);
}
return &acb->common;
@@ -1206,8 +1230,9 @@ static void send_qmp_error_event(BlockBackend *blk,
IoOperationType optype;
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
- qapi_event_send_block_io_error(blk_name(blk), optype, action,
- blk_iostatus_is_enabled(blk),
+ qapi_event_send_block_io_error(blk_name(blk),
+ bdrv_get_node_name(blk_bs(blk)), optype,
+ action, blk_iostatus_is_enabled(blk),
error == ENOSPC, strerror(error),
&error_abort);
}
@@ -1312,9 +1337,19 @@ void blk_lock_medium(BlockBackend *blk, bool locked)
void blk_eject(BlockBackend *blk, bool eject_flag)
{
BlockDriverState *bs = blk_bs(blk);
+ char *id;
+
+ /* blk_eject is only called by qdevified devices */
+ assert(!blk->legacy_dev);
if (bs) {
bdrv_eject(bs, eject_flag);
+
+ id = blk_get_attached_dev_id(blk);
+ qapi_event_send_device_tray_moved(blk_name(blk), id,
+ eject_flag, &error_abort);
+ g_free(id);
+
}
}
diff --git a/block/curl.c b/block/curl.c
index 571f24cac2..e5eaa7ba0a 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -96,7 +96,6 @@ struct BDRVCURLState;
typedef struct CURLAIOCB {
BlockAIOCB common;
- QEMUBH *bh;
QEMUIOVector *qiov;
int64_t sector_num;
@@ -739,9 +738,6 @@ static void curl_readv_bh_cb(void *p)
CURLAIOCB *acb = p;
BDRVCURLState *s = acb->common.bs->opaque;
- qemu_bh_delete(acb->bh);
- acb->bh = NULL;
-
size_t start = acb->sector_num * SECTOR_SIZE;
size_t end;
@@ -805,8 +801,7 @@ static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
acb->sector_num = sector_num;
acb->nb_sectors = nb_sectors;
- acb->bh = aio_bh_new(bdrv_get_aio_context(bs), curl_readv_bh_cb, acb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), curl_readv_bh_cb, acb);
return &acb->common;
}
diff --git a/block/dmg-bz2.c b/block/dmg-bz2.c
new file mode 100644
index 0000000000..9059492a9f
--- /dev/null
+++ b/block/dmg-bz2.c
@@ -0,0 +1,61 @@
+/*
+ * DMG bzip2 uncompression
+ *
+ * Copyright (c) 2004 Johannes E. Schindelin
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "dmg.h"
+#include <bzlib.h>
+
+static int dmg_uncompress_bz2_do(char *next_in, unsigned int avail_in,
+ char *next_out, unsigned int avail_out)
+{
+ int ret;
+ uint64_t total_out;
+ bz_stream bzstream = {};
+
+ ret = BZ2_bzDecompressInit(&bzstream, 0, 0);
+ if (ret != BZ_OK) {
+ return -1;
+ }
+ bzstream.next_in = next_in;
+ bzstream.avail_in = avail_in;
+ bzstream.next_out = next_out;
+ bzstream.avail_out = avail_out;
+ ret = BZ2_bzDecompress(&bzstream);
+ total_out = ((uint64_t)bzstream.total_out_hi32 << 32) +
+ bzstream.total_out_lo32;
+ BZ2_bzDecompressEnd(&bzstream);
+ if (ret != BZ_STREAM_END ||
+ total_out != avail_out) {
+ return -1;
+ }
+ return 0;
+}
+
+__attribute__((constructor))
+static void dmg_bz2_init(void)
+{
+ assert(!dmg_uncompress_bz2);
+ dmg_uncompress_bz2 = dmg_uncompress_bz2_do;
+}
diff --git a/block/dmg.c b/block/dmg.c
index b0ed89baa7..58a3ae86c1 100644
--- a/block/dmg.c
+++ b/block/dmg.c
@@ -28,10 +28,10 @@
#include "qemu/bswap.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
-#include <zlib.h>
-#ifdef CONFIG_BZIP2
-#include <bzlib.h>
-#endif
+#include "dmg.h"
+
+int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in,
+ char *next_out, unsigned int avail_out);
enum {
/* Limit chunk sizes to prevent unreasonable amounts of memory being used
@@ -41,31 +41,6 @@ enum {
DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
};
-typedef struct BDRVDMGState {
- CoMutex lock;
- /* each chunk contains a certain number of sectors,
- * offsets[i] is the offset in the .dmg file,
- * lengths[i] is the length of the compressed chunk,
- * sectors[i] is the sector beginning at offsets[i],
- * sectorcounts[i] is the number of sectors in that chunk,
- * the sectors array is ordered
- * 0<=i<n_chunks */
-
- uint32_t n_chunks;
- uint32_t* types;
- uint64_t* offsets;
- uint64_t* lengths;
- uint64_t* sectors;
- uint64_t* sectorcounts;
- uint32_t current_chunk;
- uint8_t *compressed_chunk;
- uint8_t *uncompressed_chunk;
- z_stream zstream;
-#ifdef CONFIG_BZIP2
- bz_stream bzstream;
-#endif
-} BDRVDMGState;
-
static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
{
int len;
@@ -210,10 +185,9 @@ static bool dmg_is_known_block_type(uint32_t entry_type)
case 0x00000001: /* uncompressed */
case 0x00000002: /* zeroes */
case 0x80000005: /* zlib */
-#ifdef CONFIG_BZIP2
- case 0x80000006: /* bzip2 */
-#endif
return true;
+ case 0x80000006: /* bzip2 */
+ return !!dmg_uncompress_bz2;
default:
return false;
}
@@ -439,6 +413,7 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
int64_t offset;
int ret;
+ block_module_load_one("dmg-bz2");
bs->read_only = true;
s->n_chunks = 0;
@@ -587,9 +562,6 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
int ret;
uint32_t chunk = search_chunk(s, sector_num);
-#ifdef CONFIG_BZIP2
- uint64_t total_out;
-#endif
if (chunk >= s->n_chunks) {
return -1;
@@ -620,8 +592,10 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
return -1;
}
break; }
-#ifdef CONFIG_BZIP2
case 0x80000006: /* bzip2 compressed */
+ if (!dmg_uncompress_bz2) {
+ break;
+ }
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file, s->offsets[chunk],
@@ -630,24 +604,15 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
return -1;
}
- ret = BZ2_bzDecompressInit(&s->bzstream, 0, 0);
- if (ret != BZ_OK) {
- return -1;
- }
- s->bzstream.next_in = (char *)s->compressed_chunk;
- s->bzstream.avail_in = (unsigned int) s->lengths[chunk];
- s->bzstream.next_out = (char *)s->uncompressed_chunk;
- s->bzstream.avail_out = (unsigned int) 512 * s->sectorcounts[chunk];
- ret = BZ2_bzDecompress(&s->bzstream);
- total_out = ((uint64_t)s->bzstream.total_out_hi32 << 32) +
- s->bzstream.total_out_lo32;
- BZ2_bzDecompressEnd(&s->bzstream);
- if (ret != BZ_STREAM_END ||
- total_out != 512 * s->sectorcounts[chunk]) {
- return -1;
+ ret = dmg_uncompress_bz2((char *)s->compressed_chunk,
+ (unsigned int) s->lengths[chunk],
+ (char *)s->uncompressed_chunk,
+ (unsigned int)
+ (512 * s->sectorcounts[chunk]));
+ if (ret < 0) {
+ return ret;
}
break;
-#endif /* CONFIG_BZIP2 */
case 1: /* copy */
ret = bdrv_pread(bs->file, s->offsets[chunk],
s->uncompressed_chunk, s->lengths[chunk]);
diff --git a/block/dmg.h b/block/dmg.h
new file mode 100644
index 0000000000..b592d6fa8b
--- /dev/null
+++ b/block/dmg.h
@@ -0,0 +1,59 @@
+/*
+ * Header for DMG driver
+ *
+ * Copyright (c) 2004-2006 Fabrice Bellard
+ * Copyright (c) 2016 Red hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef BLOCK_DMG_H
+#define BLOCK_DMG_H
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "block/block_int.h"
+#include <zlib.h>
+
+typedef struct BDRVDMGState {
+ CoMutex lock;
+ /* each chunk contains a certain number of sectors,
+ * offsets[i] is the offset in the .dmg file,
+ * lengths[i] is the length of the compressed chunk,
+ * sectors[i] is the sector beginning at offsets[i],
+ * sectorcounts[i] is the number of sectors in that chunk,
+ * the sectors array is ordered
+ * 0<=i<n_chunks */
+
+ uint32_t n_chunks;
+ uint32_t *types;
+ uint64_t *offsets;
+ uint64_t *lengths;
+ uint64_t *sectors;
+ uint64_t *sectorcounts;
+ uint32_t current_chunk;
+ uint8_t *compressed_chunk;
+ uint8_t *uncompressed_chunk;
+ z_stream zstream;
+} BDRVDMGState;
+
+extern int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in,
+ char *next_out, unsigned int avail_out);
+
+#endif
diff --git a/block/gluster.c b/block/gluster.c
index e7bd13cce3..af76d7d59a 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -38,7 +38,6 @@
typedef struct GlusterAIOCB {
int64_t size;
int ret;
- QEMUBH *bh;
Coroutine *coroutine;
AioContext *aio_context;
} GlusterAIOCB;
@@ -622,8 +621,6 @@ static void qemu_gluster_complete_aio(void *opaque)
{
GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
- qemu_bh_delete(acb->bh);
- acb->bh = NULL;
qemu_coroutine_enter(acb->coroutine);
}
@@ -642,8 +639,7 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
acb->ret = -EIO; /* Partial read/write - fail it */
}
- acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb);
}
static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
diff --git a/block/io.c b/block/io.c
index 57a2eeb512..b136c89ae0 100644
--- a/block/io.c
+++ b/block/io.c
@@ -171,7 +171,6 @@ static void bdrv_drain_recurse(BlockDriverState *bs)
typedef struct {
Coroutine *co;
BlockDriverState *bs;
- QEMUBH *bh;
bool done;
} BdrvCoDrainData;
@@ -191,7 +190,6 @@ static void bdrv_co_drain_bh_cb(void *opaque)
BdrvCoDrainData *data = opaque;
Coroutine *co = data->co;
- qemu_bh_delete(data->bh);
bdrv_drain_poll(data->bs);
data->done = true;
qemu_coroutine_enter(co);
@@ -210,9 +208,9 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
.co = qemu_coroutine_self(),
.bs = bs,
.done = false,
- .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
};
- qemu_bh_schedule(data.bh);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
+ bdrv_co_drain_bh_cb, &data);
qemu_coroutine_yield();
/* If we are resumed from some other event (such as an aio completion or a
@@ -2095,7 +2093,6 @@ typedef struct BlockAIOCBCoroutine {
bool is_write;
bool need_bh;
bool *done;
- QEMUBH* bh;
} BlockAIOCBCoroutine;
static const AIOCBInfo bdrv_em_co_aiocb_info = {
@@ -2115,7 +2112,6 @@ static void bdrv_co_em_bh(void *opaque)
BlockAIOCBCoroutine *acb = opaque;
assert(!acb->need_bh);
- qemu_bh_delete(acb->bh);
bdrv_co_complete(acb);
}
@@ -2125,8 +2121,7 @@ static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
if (acb->req.error != -EINPROGRESS) {
BlockDriverState *bs = acb->common.bs;
- acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
}
}
diff --git a/block/iscsi.c b/block/iscsi.c
index dff548a139..46ddc355ac 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -95,7 +95,6 @@ typedef struct IscsiTask {
int do_retry;
struct scsi_task *task;
Coroutine *co;
- QEMUBH *bh;
IscsiLun *iscsilun;
QEMUTimer retry_timer;
int err_code;
@@ -167,7 +166,6 @@ static void iscsi_co_generic_bh_cb(void *opaque)
{
struct IscsiTask *iTask = opaque;
iTask->complete = 1;
- qemu_bh_delete(iTask->bh);
qemu_coroutine_enter(iTask->co);
}
@@ -299,9 +297,8 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
out:
if (iTask->co) {
- iTask->bh = aio_bh_new(iTask->iscsilun->aio_context,
- iscsi_co_generic_bh_cb, iTask);
- qemu_bh_schedule(iTask->bh);
+ aio_bh_schedule_oneshot(iTask->iscsilun->aio_context,
+ iscsi_co_generic_bh_cb, iTask);
} else {
iTask->complete = 1;
}
diff --git a/block/nfs.c b/block/nfs.c
index 8602a44211..c3db2ec58d 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -57,7 +57,6 @@ typedef struct NFSRPC {
QEMUIOVector *iov;
struct stat *st;
Coroutine *co;
- QEMUBH *bh;
NFSClient *client;
} NFSRPC;
@@ -103,7 +102,6 @@ static void nfs_co_generic_bh_cb(void *opaque)
{
NFSRPC *task = opaque;
task->complete = 1;
- qemu_bh_delete(task->bh);
qemu_coroutine_enter(task->co);
}
@@ -127,9 +125,8 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
error_report("NFS Error: %s", nfs_get_error(nfs));
}
if (task->co) {
- task->bh = aio_bh_new(task->client->aio_context,
- nfs_co_generic_bh_cb, task);
- qemu_bh_schedule(task->bh);
+ aio_bh_schedule_oneshot(task->client->aio_context,
+ nfs_co_generic_bh_cb, task);
} else {
task->complete = 1;
}
diff --git a/block/null.c b/block/null.c
index b511010ba5..b300390944 100644
--- a/block/null.c
+++ b/block/null.c
@@ -124,7 +124,6 @@ static coroutine_fn int null_co_flush(BlockDriverState *bs)
typedef struct {
BlockAIOCB common;
- QEMUBH *bh;
QEMUTimer timer;
} NullAIOCB;
@@ -136,7 +135,6 @@ static void null_bh_cb(void *opaque)
{
NullAIOCB *acb = opaque;
acb->common.cb(acb->common.opaque, 0);
- qemu_bh_delete(acb->bh);
qemu_aio_unref(acb);
}
@@ -164,8 +162,7 @@ static inline BlockAIOCB *null_aio_common(BlockDriverState *bs,
timer_mod_ns(&acb->timer,
qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + s->latency_ns);
} else {
- acb->bh = aio_bh_new(bdrv_get_aio_context(bs), null_bh_cb, acb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), null_bh_cb, acb);
}
return &acb->common;
}
diff --git a/block/qed.c b/block/qed.c
index 426f3cb447..3ee879b52e 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -909,7 +909,6 @@ static void qed_aio_complete_bh(void *opaque)
void *user_opaque = acb->common.opaque;
int ret = acb->bh_ret;
- qemu_bh_delete(acb->bh);
qemu_aio_unref(acb);
/* Invoke callback */
@@ -934,9 +933,8 @@ static void qed_aio_complete(QEDAIOCB *acb, int ret)
/* Arrange for a bh to invoke the completion function */
acb->bh_ret = ret;
- acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
- qed_aio_complete_bh, acb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
+ qed_aio_complete_bh, acb);
/* Start next allocating write request waiting behind this one. Note that
* requests enqueue themselves when they first hit an unallocated cluster
diff --git a/block/qed.h b/block/qed.h
index 22b3198751..9676ab9479 100644
--- a/block/qed.h
+++ b/block/qed.h
@@ -130,7 +130,6 @@ enum {
typedef struct QEDAIOCB {
BlockAIOCB common;
- QEMUBH *bh;
int bh_ret; /* final return status for completion bh */
QSIMPLEQ_ENTRY(QEDAIOCB) next; /* next request */
int flags; /* QED_AIOCB_* bits ORed together */
diff --git a/block/rbd.c b/block/rbd.c
index 0106fea45f..6f9eb6fb9c 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -71,7 +71,6 @@ typedef enum {
typedef struct RBDAIOCB {
BlockAIOCB common;
- QEMUBH *bh;
int64_t ret;
QEMUIOVector *qiov;
char *bounce;
@@ -602,7 +601,6 @@ static const AIOCBInfo rbd_aiocb_info = {
static void rbd_finish_bh(void *opaque)
{
RADOSCB *rcb = opaque;
- qemu_bh_delete(rcb->acb->bh);
qemu_rbd_complete_aio(rcb);
}
@@ -621,9 +619,8 @@ static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb)
rcb->ret = rbd_aio_get_return_value(c);
rbd_aio_release(c);
- acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
- rbd_finish_bh, rcb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
+ rbd_finish_bh, rcb);
}
static int rbd_aio_discard_wrapper(rbd_image_t image,
@@ -679,7 +676,6 @@ static BlockAIOCB *rbd_start_aio(BlockDriverState *bs,
acb->ret = 0;
acb->error = 0;
acb->s = s;
- acb->bh = NULL;
if (cmd == RBD_AIO_WRITE) {
qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
diff --git a/block/write-threshold.c b/block/write-threshold.c
index cc2ca71835..0bd1a01c86 100644
--- a/block/write-threshold.c
+++ b/block/write-threshold.c
@@ -76,8 +76,7 @@ static int coroutine_fn before_write_notify(NotifierWithReturn *notifier,
static void write_threshold_register_notifier(BlockDriverState *bs)
{
bs->write_threshold_notifier.notify = before_write_notify;
- notifier_with_return_list_add(&bs->before_write_notifiers,
- &bs->write_threshold_notifier);
+ bdrv_add_before_write_notifier(bs, &bs->write_threshold_notifier);
}
static void write_threshold_update(BlockDriverState *bs,