aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorDaniel P. Berrangé <berrange@redhat.com>2022-06-20 12:01:47 +0100
committerDr. David Alan Gilbert <dgilbert@redhat.com>2022-06-22 18:11:21 +0100
commit246683c22f21df0572bd3ab756ac9e688e856cb1 (patch)
tree4d31306a516ef66830d0691529c1c41660e294ba /migration
parentc0e0825c98cf608fe2775395b79c53efe0324f8e (diff)
migration: remove unreachble RDMA code in save_hook impl
The QEMUFile 'save_hook' callback has a 'size_t size' parameter. The RDMA impl of this has logic that takes different actions depending on whether the value is zero or non-zero. It has commented out logic that would have taken further actions if the value was negative. The only place where the 'save_hook' callback is invoked is the ram_control_save_page() method, which passes 'size' through from its caller. The only caller of this method is in turn control_save_page(). This method unconditionally passes the 'TARGET_PAGE_SIZE' constant for the 'size' parameter. IOW, the only scenario for 'size' that can execute in the qemu_rdma_save_page method is 'size > 0'. The remaining code has been unreachable since RDMA support was first introduced 9 years ago. Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/rdma.c120
1 files changed, 21 insertions, 99 deletions
diff --git a/migration/rdma.c b/migration/rdma.c
index 8504152f39..c5fa4a408a 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -1463,34 +1463,6 @@ static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index,
}
/*
- * Set bit for unregistration in the next iteration.
- * We cannot transmit right here, but will unpin later.
- */
-static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index,
- uint64_t chunk, uint64_t wr_id)
-{
- if (rdma->unregistrations[rdma->unregister_next] != 0) {
- error_report("rdma migration: queue is full");
- } else {
- RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
-
- if (!test_and_set_bit(chunk, block->unregister_bitmap)) {
- trace_qemu_rdma_signal_unregister_append(chunk,
- rdma->unregister_next);
-
- rdma->unregistrations[rdma->unregister_next++] =
- qemu_rdma_make_wrid(wr_id, index, chunk);
-
- if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) {
- rdma->unregister_next = 0;
- }
- } else {
- trace_qemu_rdma_signal_unregister_already(chunk);
- }
- }
-}
-
-/*
* Consult the connection manager to see a work request
* (of any kind) has completed.
* Return the work request ID that completed.
@@ -3237,23 +3209,7 @@ qio_channel_rdma_shutdown(QIOChannel *ioc,
* Offset is an offset to be added to block_offset and used
* to also lookup the corresponding RAMBlock.
*
- * @size > 0 :
- * Initiate an transfer this size.
- *
- * @size == 0 :
- * A 'hint' or 'advice' that means that we wish to speculatively
- * and asynchronously unregister this memory. In this case, there is no
- * guarantee that the unregister will actually happen, for example,
- * if the memory is being actively transmitted. Additionally, the memory
- * may be re-registered at any future time if a write within the same
- * chunk was requested again, even if you attempted to unregister it
- * here.
- *
- * @size < 0 : TODO, not yet supported
- * Unregister the memory NOW. This means that the caller does not
- * expect there to be any future RDMA transfers and we just want to clean
- * things up. This is used in case the upper layer owns the memory and
- * cannot wait for qemu_fclose() to occur.
+ * @size : Number of bytes to transfer
*
* @bytes_sent : User-specificed pointer to indicate how many bytes were
* sent. Usually, this will not be more than a few bytes of
@@ -3282,61 +3238,27 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
qemu_fflush(f);
- if (size > 0) {
- /*
- * Add this page to the current 'chunk'. If the chunk
- * is full, or the page doesn't belong to the current chunk,
- * an actual RDMA write will occur and a new chunk will be formed.
- */
- ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
- if (ret < 0) {
- error_report("rdma migration: write error! %d", ret);
- goto err;
- }
-
- /*
- * We always return 1 bytes because the RDMA
- * protocol is completely asynchronous. We do not yet know
- * whether an identified chunk is zero or not because we're
- * waiting for other pages to potentially be merged with
- * the current chunk. So, we have to call qemu_update_position()
- * later on when the actual write occurs.
- */
- if (bytes_sent) {
- *bytes_sent = 1;
- }
- } else {
- uint64_t index, chunk;
-
- /* TODO: Change QEMUFileOps prototype to be signed: size_t => long
- if (size < 0) {
- ret = qemu_rdma_drain_cq(f, rdma);
- if (ret < 0) {
- fprintf(stderr, "rdma: failed to synchronously drain"
- " completion queue before unregistration.\n");
- goto err;
- }
- }
- */
-
- ret = qemu_rdma_search_ram_block(rdma, block_offset,
- offset, size, &index, &chunk);
-
- if (ret) {
- error_report("ram block search failed");
- goto err;
- }
-
- qemu_rdma_signal_unregister(rdma, index, chunk, 0);
+ /*
+ * Add this page to the current 'chunk'. If the chunk
+ * is full, or the page doesn't belong to the current chunk,
+ * an actual RDMA write will occur and a new chunk will be formed.
+ */
+ ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
+ if (ret < 0) {
+ error_report("rdma migration: write error! %d", ret);
+ goto err;
+ }
- /*
- * TODO: Synchronous, guaranteed unregistration (should not occur during
- * fast-path). Otherwise, unregisters will process on the next call to
- * qemu_rdma_drain_cq()
- if (size < 0) {
- qemu_rdma_unregister_waiting(rdma);
- }
- */
+ /*
+ * We always return 1 bytes because the RDMA
+ * protocol is completely asynchronous. We do not yet know
+ * whether an identified chunk is zero or not because we're
+ * waiting for other pages to potentially be merged with
+ * the current chunk. So, we have to call qemu_update_position()
+ * later on when the actual write occurs.
+ */
+ if (bytes_sent) {
+ *bytes_sent = 1;
}
/*