aboutsummaryrefslogtreecommitdiff
path: root/hw/net/igb_core.c
diff options
context:
space:
mode:
authorAkihiko Odaki <akihiko.odaki@daynix.com>2023-03-24 18:54:32 +0900
committerJason Wang <jasowang@redhat.com>2023-03-28 13:10:55 +0800
commitf4fdaf009cc85e95a00aba47a6b5b9df920d51c4 (patch)
tree2c9c8cd8551f9b8a43afca752813d4caaa03adc6 /hw/net/igb_core.c
parent212f7b1dac9e4ac344000cc4816097ce2bbe3993 (diff)
igb: Fix DMA requester specification for Tx packet
igb used to specify the PF as DMA requester when reading Tx packets. This made Tx requests from VFs to be performed on the address space of the PF, defeating the purpose of SR-IOV. Add some logic to change the requester depending on the queue, which can be assigned to a VF. Fixes: 3a977deebe ("Intrdocue igb device emulation") Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
Diffstat (limited to 'hw/net/igb_core.c')
-rw-r--r--hw/net/igb_core.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index 7708333c2a..78d30738e6 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -523,6 +523,7 @@ igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt)
static void
igb_process_tx_desc(IGBCore *core,
+ PCIDevice *dev,
struct igb_tx *tx,
union e1000_adv_tx_desc *tx_desc,
int queue_index)
@@ -588,7 +589,7 @@ igb_process_tx_desc(IGBCore *core,
tx->first = true;
tx->skip_cp = false;
- net_tx_pkt_reset(tx->tx_pkt);
+ net_tx_pkt_reset(tx->tx_pkt, dev);
}
}
@@ -803,6 +804,8 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
d = core->owner;
}
+ net_tx_pkt_reset(txr->tx->tx_pkt, d);
+
while (!igb_ring_empty(core, txi)) {
base = igb_ring_head_descr(core, txi);
@@ -811,7 +814,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr,
desc.read.cmd_type_len, desc.wb.status);
- igb_process_tx_desc(core, txr->tx, &desc, txi->idx);
+ igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx);
igb_ring_advance(core, txi, 1);
eic |= igb_txdesc_writeback(core, base, &desc, txi);
}
@@ -3828,7 +3831,7 @@ igb_core_pci_realize(IGBCore *core,
core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
for (i = 0; i < IGB_NUM_QUEUES; i++) {
- net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS);
+ net_tx_pkt_init(&core->tx[i].tx_pkt, NULL, E1000E_MAX_TX_FRAGS);
}
net_rx_pkt_init(&core->rx_pkt);
@@ -3853,7 +3856,7 @@ igb_core_pci_uninit(IGBCore *core)
qemu_del_vm_change_state_handler(core->vmstate);
for (i = 0; i < IGB_NUM_QUEUES; i++) {
- net_tx_pkt_reset(core->tx[i].tx_pkt);
+ net_tx_pkt_reset(core->tx[i].tx_pkt, NULL);
net_tx_pkt_uninit(core->tx[i].tx_pkt);
}
@@ -4026,7 +4029,7 @@ static void igb_reset(IGBCore *core, bool sw)
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
tx = &core->tx[i];
- net_tx_pkt_reset(tx->tx_pkt);
+ net_tx_pkt_reset(tx->tx_pkt, NULL);
memset(tx->ctx, 0, sizeof(tx->ctx));
tx->first = true;
tx->skip_cp = false;