diff options
author | Tim Smith <tim.smith@citrix.com> | 2018-12-12 11:16:26 +0000 |
---|---|---|
committer | Anthony PERARD <anthony.perard@citrix.com> | 2019-01-14 13:45:40 +0000 |
commit | c6025bd197d0dbcc5067553fd12538d8b29383c2 (patch) | |
tree | b4b04c95b9c75162cdf60dbcda51c9ca7563071b /hw/block | |
parent | bfd0d6366043935990a69ed4d8183f88772256ee (diff) |
xen-block: avoid repeated memory allocation
The xen-block dataplane currently allocates memory to hold the data for
each request as that request is used, and frees it afterwards. Because
it requires page-aligned blocks, this interacts poorly with non-page-
aligned allocations and balloons the heap.
Instead, allocate the maximum possible buffer size required for the
protocol, which is BLKIF_MAX_SEGMENTS_PER_REQUEST (currently 11) pages
when the request structure is created, and keep that buffer until it is
destroyed. Since the requests are re-used via a free list, this should
actually improve memory usage.
Signed-off-by: Tim Smith <tim.smith@citrix.com>
Re-based and commit comment adjusted.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
Diffstat (limited to 'hw/block')
-rw-r--r-- | hw/block/dataplane/xen-block.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index 35bfccfba7..d0d8905a33 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -70,7 +70,6 @@ static void reset_request(XenBlockRequest *request) memset(&request->req, 0, sizeof(request->req)); request->status = 0; request->start = 0; - request->buf = NULL; request->size = 0; request->presync = 0; @@ -95,6 +94,14 @@ static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane) /* allocate new struct */ request = g_malloc0(sizeof(*request)); request->dataplane = dataplane; + /* + * We cannot need more pages per requests than this, and since we + * re-use requests, allocate the memory once here. It will be freed + * xen_block_dataplane_destroy() when the request list is freed. + */ + request->buf = qemu_memalign(XC_PAGE_SIZE, + BLKIF_MAX_SEGMENTS_PER_REQUEST * + XC_PAGE_SIZE); dataplane->requests_total++; qemu_iovec_init(&request->v, 1); } else { @@ -272,14 +279,12 @@ static void xen_block_complete_aio(void *opaque, int ret) if (ret == 0) { xen_block_copy_request(request); } - qemu_vfree(request->buf); break; case BLKIF_OP_WRITE: case BLKIF_OP_FLUSH_DISKCACHE: if (!request->req.nr_segments) { break; } - qemu_vfree(request->buf); break; default: break; @@ -360,12 +365,10 @@ static int xen_block_do_aio(XenBlockRequest *request) { XenBlockDataPlane *dataplane = request->dataplane; - request->buf = qemu_memalign(XC_PAGE_SIZE, request->size); if (request->req.nr_segments && (request->req.operation == BLKIF_OP_WRITE || request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) && xen_block_copy_request(request)) { - qemu_vfree(request->buf); goto err; } @@ -665,6 +668,7 @@ void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane) request = QLIST_FIRST(&dataplane->freelist); QLIST_REMOVE(request, list); qemu_iovec_destroy(&request->v); + qemu_vfree(request->buf); g_free(request); } |