aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEugenio Pérez <eperezma@redhat.com>2023-12-21 18:43:13 +0100
committerMichael S. Tsirkin <mst@redhat.com>2023-12-26 04:51:07 -0500
commita6e823d40eb75110a8a1c6eee9650309412a5e9c (patch)
tree0e29a55f9454760f05e7c758f00551b815879bfe /net
parentae25ff41b72366248aa89bdc8be58aa86f67e4c3 (diff)
vdpa: move shadow_data to vhost_vdpa_shared
Next patches will register the vhost_vdpa memory listener while the VM is migrating at the destination, so we can map the memory to the device before stopping the VM at the source. The main goal is to reduce the downtime. However, the destination QEMU is unaware of which vhost_vdpa device will register its memory_listener. If the source guest has CVQ enabled, it will be the CVQ device. Otherwise, it will be the first one. Move the shadow_data member to VhostVDPAShared so all vhost_vdpa can use it, rather than always in the first or last vhost_vdpa. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20231221174322.3130442-5-eperezma@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/vhost-vdpa.c22
1 files changed, 5 insertions, 17 deletions
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 7be2c30ad3..bf8e8327da 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -290,15 +290,6 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
return size;
}
-/** From any vdpa net client, get the netclient of the first queue pair */
-static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
-{
- NICState *nic = qemu_get_nic(s->nc.peer);
- NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
-
- return DO_UPCAST(VhostVDPAState, nc, nc0);
-}
-
static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
{
struct vhost_vdpa *v = &s->vhost_vdpa;
@@ -369,13 +360,12 @@ static int vhost_vdpa_net_data_start(NetClientState *nc)
if (s->always_svq ||
migration_is_setup_or_active(migrate_get_current()->state)) {
v->shadow_vqs_enabled = true;
- v->shadow_data = true;
} else {
v->shadow_vqs_enabled = false;
- v->shadow_data = false;
}
if (v->index == 0) {
+ v->shared->shadow_data = v->shadow_vqs_enabled;
vhost_vdpa_net_data_start_first(s);
return 0;
}
@@ -523,7 +513,7 @@ dma_map_err:
static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{
- VhostVDPAState *s, *s0;
+ VhostVDPAState *s;
struct vhost_vdpa *v;
int64_t cvq_group;
int r;
@@ -534,12 +524,10 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
s = DO_UPCAST(VhostVDPAState, nc, nc);
v = &s->vhost_vdpa;
- s0 = vhost_vdpa_net_first_nc_vdpa(s);
- v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
- v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
+ v->shadow_vqs_enabled = v->shared->shadow_data;
s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
- if (s->vhost_vdpa.shadow_data) {
+ if (v->shared->shadow_data) {
/* SVQ is already configured for all virtqueues */
goto out;
}
@@ -1688,12 +1676,12 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->always_svq = svq;
s->migration_state.notify = NULL;
s->vhost_vdpa.shadow_vqs_enabled = svq;
- s->vhost_vdpa.shadow_data = svq;
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
s->vhost_vdpa.shared->iova_range = iova_range;
+ s->vhost_vdpa.shared->shadow_data = svq;
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,