aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
Diffstat (limited to 'util')
-rw-r--r--util/aio-posix.c90
-rw-r--r--util/aio-win32.c67
-rw-r--r--util/hbitmap.c76
3 files changed, 148 insertions, 85 deletions
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 51c41ed3c9..8640dfde9f 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -200,6 +200,31 @@ static AioHandler *find_aio_handler(AioContext *ctx, int fd)
return NULL;
}
+static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
+{
+ /* If the GSource is in the process of being destroyed then
+ * g_source_remove_poll() causes an assertion failure. Skip
+ * removal in that case, because glib cleans up its state during
+ * destruction anyway.
+ */
+ if (!g_source_is_destroyed(&ctx->source)) {
+ g_source_remove_poll(&ctx->source, &node->pfd);
+ }
+
+ /* If a read is in progress, just mark the node as deleted */
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
+ node->deleted = 1;
+ node->pfd.revents = 0;
+ return false;
+ }
+ /* Otherwise, delete it for real. We can't just mark it as
+ * deleted because deleted nodes are only cleaned up while
+ * no one is walking the handlers list.
+ */
+ QLIST_REMOVE(node, node);
+ return true;
+}
+
void aio_set_fd_handler(AioContext *ctx,
int fd,
bool is_external,
@@ -209,6 +234,7 @@ void aio_set_fd_handler(AioContext *ctx,
void *opaque)
{
AioHandler *node;
+ AioHandler *new_node = NULL;
bool is_new = false;
bool deleted = false;
int poll_disable_change;
@@ -223,50 +249,39 @@ void aio_set_fd_handler(AioContext *ctx,
qemu_lockcnt_unlock(&ctx->list_lock);
return;
}
+ /* Clean events in order to unregister fd from the ctx epoll. */
+ node->pfd.events = 0;
- /* If the GSource is in the process of being destroyed then
- * g_source_remove_poll() causes an assertion failure. Skip
- * removal in that case, because glib cleans up its state during
- * destruction anyway.
- */
- if (!g_source_is_destroyed(&ctx->source)) {
- g_source_remove_poll(&ctx->source, &node->pfd);
- }
-
- /* If a read is in progress, just mark the node as deleted */
- if (qemu_lockcnt_count(&ctx->list_lock)) {
- node->deleted = 1;
- node->pfd.revents = 0;
- } else {
- /* Otherwise, delete it for real. We can't just mark it as
- * deleted because deleted nodes are only cleaned up while
- * no one is walking the handlers list.
- */
- QLIST_REMOVE(node, node);
- deleted = true;
- }
poll_disable_change = -!node->io_poll;
} else {
poll_disable_change = !io_poll - (node && !node->io_poll);
if (node == NULL) {
- /* Alloc and insert if it's not already there */
- node = g_new0(AioHandler, 1);
- node->pfd.fd = fd;
- QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
-
- g_source_add_poll(&ctx->source, &node->pfd);
is_new = true;
}
+ /* Alloc and insert if it's not already there */
+ new_node = g_new0(AioHandler, 1);
/* Update handler with latest information */
- node->io_read = io_read;
- node->io_write = io_write;
- node->io_poll = io_poll;
- node->opaque = opaque;
- node->is_external = is_external;
+ new_node->io_read = io_read;
+ new_node->io_write = io_write;
+ new_node->io_poll = io_poll;
+ new_node->opaque = opaque;
+ new_node->is_external = is_external;
+
+ if (is_new) {
+ new_node->pfd.fd = fd;
+ } else {
+ new_node->pfd = node->pfd;
+ }
+ g_source_add_poll(&ctx->source, &new_node->pfd);
+
+ new_node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
+ new_node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
- node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
- node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
+ }
+ if (node) {
+ deleted = aio_remove_fd_handler(ctx, node);
}
/* No need to order poll_disable_cnt writes against other updates;
@@ -278,7 +293,12 @@ void aio_set_fd_handler(AioContext *ctx,
atomic_set(&ctx->poll_disable_cnt,
atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
- aio_epoll_update(ctx, node, is_new);
+ if (new_node) {
+ aio_epoll_update(ctx, new_node, is_new);
+ } else if (node) {
+ /* Unregister deleted fd_handler */
+ aio_epoll_update(ctx, node, false);
+ }
qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
diff --git a/util/aio-win32.c b/util/aio-win32.c
index c58957cc4b..a23b9c364d 100644
--- a/util/aio-win32.c
+++ b/util/aio-win32.c
@@ -35,6 +35,22 @@ struct AioHandler {
QLIST_ENTRY(AioHandler) node;
};
+static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
+{
+ /* If aio_poll is in progress, just mark the node as deleted */
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
+ node->deleted = 1;
+ node->pfd.revents = 0;
+ } else {
+ /* Otherwise, delete it for real. We can't just mark it as
+ * deleted because deleted nodes are only cleaned up after
+ * releasing the list_lock.
+ */
+ QLIST_REMOVE(node, node);
+ g_free(node);
+ }
+}
+
void aio_set_fd_handler(AioContext *ctx,
int fd,
bool is_external,
@@ -44,41 +60,23 @@ void aio_set_fd_handler(AioContext *ctx,
void *opaque)
{
/* fd is a SOCKET in our case */
- AioHandler *node;
+ AioHandler *old_node;
+ AioHandler *node = NULL;
qemu_lockcnt_lock(&ctx->list_lock);
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
- if (node->pfd.fd == fd && !node->deleted) {
+ QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
+ if (old_node->pfd.fd == fd && !old_node->deleted) {
break;
}
}
- /* Are we deleting the fd handler? */
- if (!io_read && !io_write) {
- if (node) {
- /* If aio_poll is in progress, just mark the node as deleted */
- if (qemu_lockcnt_count(&ctx->list_lock)) {
- node->deleted = 1;
- node->pfd.revents = 0;
- } else {
- /* Otherwise, delete it for real. We can't just mark it as
- * deleted because deleted nodes are only cleaned up after
- * releasing the list_lock.
- */
- QLIST_REMOVE(node, node);
- g_free(node);
- }
- }
- } else {
+ if (io_read || io_write) {
HANDLE event;
long bitmask = 0;
- if (node == NULL) {
- /* Alloc and insert if it's not already there */
- node = g_new0(AioHandler, 1);
- node->pfd.fd = fd;
- QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
- }
+ /* Alloc and insert if it's not already there */
+ node = g_new0(AioHandler, 1);
+ node->pfd.fd = fd;
node->pfd.events = 0;
if (node->io_read) {
@@ -104,9 +102,13 @@ void aio_set_fd_handler(AioContext *ctx,
bitmask |= FD_WRITE | FD_CONNECT;
}
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
event = event_notifier_get_handle(&ctx->notifier);
WSAEventSelect(node->pfd.fd, event, bitmask);
}
+ if (old_node) {
+ aio_remove_fd_handler(ctx, old_node);
+ }
qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
@@ -139,18 +141,7 @@ void aio_set_event_notifier(AioContext *ctx,
if (node) {
g_source_remove_poll(&ctx->source, &node->pfd);
- /* aio_poll is in progress, just mark the node as deleted */
- if (qemu_lockcnt_count(&ctx->list_lock)) {
- node->deleted = 1;
- node->pfd.revents = 0;
- } else {
- /* Otherwise, delete it for real. We can't just mark it as
- * deleted because deleted nodes are only cleaned up after
- * releasing the list_lock.
- */
- QLIST_REMOVE(node, node);
- g_free(node);
- }
+ aio_remove_fd_handler(ctx, node);
}
} else {
if (node == NULL) {
diff --git a/util/hbitmap.c b/util/hbitmap.c
index 8d402c59d9..7905212a8b 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -53,6 +53,9 @@
*/
struct HBitmap {
+ /* Size of the bitmap, as requested in hbitmap_alloc. */
+ uint64_t orig_size;
+
/* Number of total bits in the bottom level. */
uint64_t size;
@@ -141,7 +144,7 @@ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi)
return cur;
}
-int64_t hbitmap_iter_next(HBitmapIter *hbi, bool advance)
+int64_t hbitmap_iter_next(HBitmapIter *hbi)
{
unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] &
hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos];
@@ -154,12 +157,8 @@ int64_t hbitmap_iter_next(HBitmapIter *hbi, bool advance)
}
}
- if (advance) {
- /* The next call will resume work from the next bit. */
- hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);
- } else {
- hbi->cur[HBITMAP_LEVELS - 1] = cur;
- }
+ /* The next call will resume work from the next bit. */
+ hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);
item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur);
return item << hbi->granularity;
@@ -192,16 +191,28 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
}
}
-int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
+int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
{
size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
- uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1];
unsigned long cur = last_lev[pos];
- unsigned start_bit_offset =
- (start >> hb->granularity) & (BITS_PER_LONG - 1);
+ unsigned start_bit_offset;
+ uint64_t end_bit, sz;
int64_t res;
+ if (start >= hb->orig_size || count == 0) {
+ return -1;
+ }
+
+ end_bit = count > hb->orig_size - start ?
+ hb->size :
+ ((start + count - 1) >> hb->granularity) + 1;
+ sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL;
+
+ /* There may be some zero bits in @cur before @start. We are not interested
+ * in them, let's set them.
+ */
+ start_bit_offset = (start >> hb->granularity) & (BITS_PER_LONG - 1);
cur |= (1UL << start_bit_offset) - 1;
assert((start >> hb->granularity) < hb->size);
@@ -218,7 +229,7 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
}
res = (pos << BITS_PER_LEVEL) + ctol(cur);
- if (res >= hb->size) {
+ if (res >= end_bit) {
return -1;
}
@@ -231,6 +242,45 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
return res;
}
+bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start,
+ uint64_t *count)
+{
+ HBitmapIter hbi;
+ int64_t firt_dirty_off, area_end;
+ uint32_t granularity = 1UL << hb->granularity;
+ uint64_t end;
+
+ if (*start >= hb->orig_size || *count == 0) {
+ return false;
+ }
+
+ end = *count > hb->orig_size - *start ? hb->orig_size : *start + *count;
+
+ hbitmap_iter_init(&hbi, hb, *start);
+ firt_dirty_off = hbitmap_iter_next(&hbi);
+
+ if (firt_dirty_off < 0 || firt_dirty_off >= end) {
+ return false;
+ }
+
+ if (firt_dirty_off + granularity >= end) {
+ area_end = end;
+ } else {
+ area_end = hbitmap_next_zero(hb, firt_dirty_off + granularity,
+ end - firt_dirty_off - granularity);
+ if (area_end < 0) {
+ area_end = end;
+ }
+ }
+
+ if (firt_dirty_off > *start) {
+ *start = firt_dirty_off;
+ }
+ *count = area_end - *start;
+
+ return true;
+}
+
bool hbitmap_empty(const HBitmap *hb)
{
return hb->count == 0;
@@ -652,6 +702,8 @@ HBitmap *hbitmap_alloc(uint64_t size, int granularity)
HBitmap *hb = g_new0(struct HBitmap, 1);
unsigned i;
+ hb->orig_size = size;
+
assert(granularity >= 0 && granularity < 64);
size = (size + (1ULL << granularity) - 1) >> granularity;
assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));