diff options
author | Kevin Wolf <kwolf@redhat.com> | 2013-12-04 16:43:44 +0100 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2014-01-24 17:40:02 +0100 |
commit | 2dbafdc012d3ea81a97fec6226ca82d644539c9a (patch) | |
tree | eed999e7c42d8262fcdedaec8a8ed28962de6859 | |
parent | ec746e10cb2e6276a8d2e036454792fe0674864a (diff) |
block: Generalise and optimise COR serialisation
Change the API so that specific requests can be marked serialising. Only
these requests are checked for overlaps then.
This means that during a Copy on Read operation, not all requests
overlapping other requests are serialised any more, but only those that
actually overlap with the specific COR request.
Also remove COR from function and variable names because this
functionality can be useful in other contexts.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Benoit Canet <benoit@irqsave.net>
-rw-r--r-- | block.c | 48 | ||||
-rw-r--r-- | include/block/block_int.h | 5 |
2 files changed, 32 insertions, 21 deletions
@@ -2208,6 +2208,10 @@ int bdrv_commit_all(void) */ static void tracked_request_end(BdrvTrackedRequest *req) { + if (req->serialising) { + req->bs->serialising_in_flight--; + } + QLIST_REMOVE(req, list); qemu_co_queue_restart_all(&req->wait_queue); } @@ -2222,10 +2226,11 @@ static void tracked_request_begin(BdrvTrackedRequest *req, { *req = (BdrvTrackedRequest){ .bs = bs, - .offset = offset, - .bytes = bytes, - .is_write = is_write, - .co = qemu_coroutine_self(), + .offset = offset, + .bytes = bytes, + .is_write = is_write, + .co = qemu_coroutine_self(), + .serialising = false, }; qemu_co_queue_init(&req->wait_queue); @@ -2233,6 +2238,14 @@ static void tracked_request_begin(BdrvTrackedRequest *req, QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); } +static void mark_request_serialising(BdrvTrackedRequest *req) +{ + if (!req->serialising) { + req->bs->serialising_in_flight++; + req->serialising = true; + } +} + /** * Round a region to cluster boundaries */ @@ -2285,26 +2298,31 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req, return true; } -static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs, - BdrvTrackedRequest *self, int64_t offset, unsigned int bytes) +static void coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) { + BlockDriverState *bs = self->bs; BdrvTrackedRequest *req; int64_t cluster_offset; unsigned int cluster_bytes; bool retry; + if (!bs->serialising_in_flight) { + return; + } + /* If we touch the same cluster it counts as an overlap. This guarantees * that allocating writes will be serialized and not race with each other * for the same cluster. For example, in copy-on-read it ensures that the * CoR read and write operations are atomic and guest writes cannot * interleave between them. */ - round_bytes_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); + round_bytes_to_clusters(bs, self->offset, self->bytes, + &cluster_offset, &cluster_bytes); do { retry = false; QLIST_FOREACH(req, &bs->tracked_requests, list) { - if (req == self) { + if (req == self || (!req->serialising && !self->serialising)) { continue; } if (tracked_request_overlaps(req, cluster_offset, cluster_bytes)) { @@ -2923,12 +2941,10 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, /* Handle Copy on Read and associated serialisation */ if (flags & BDRV_REQ_COPY_ON_READ) { - bs->copy_on_read_in_flight++; + mark_request_serialising(req); } - if (bs->copy_on_read_in_flight) { - wait_for_overlapping_requests(bs, req, offset, bytes); - } + wait_serialising_requests(req); if (flags & BDRV_REQ_COPY_ON_READ) { int pnum; @@ -2977,10 +2993,6 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, } out: - if (flags & BDRV_REQ_COPY_ON_READ) { - bs->copy_on_read_in_flight--; - } - return ret; } @@ -3179,9 +3191,7 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); - if (bs->copy_on_read_in_flight) { - wait_for_overlapping_requests(bs, req, offset, bytes); - } + wait_serialising_requests(req); ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); diff --git a/include/block/block_int.h b/include/block/block_int.h index bcdd98c503..c1153cb3ab 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -60,6 +60,7 @@ typedef struct BdrvTrackedRequest { int64_t offset; unsigned int bytes; bool is_write; + bool serialising; QLIST_ENTRY(BdrvTrackedRequest) list; Coroutine *co; /* owner, used for deadlock detection */ CoQueue wait_queue; /* coroutines blocked on this request */ @@ -302,8 +303,8 @@ struct BlockDriverState { /* Callback before write request is processed */ NotifierWithReturnList before_write_notifiers; - /* number of in-flight copy-on-read requests */ - unsigned int copy_on_read_in_flight; + /* number of in-flight serialising requests */ + unsigned int serialising_in_flight; /* I/O throttling */ ThrottleState throttle_state; |