aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuigi Rizzo <rizzo@iet.unipi.it>2013-02-05 18:29:09 +0100
committerStefan Hajnoczi <stefanha@redhat.com>2013-02-27 16:10:39 +0100
commit7d91ddd25e3a4e5008a2ac16127d51a34fd56bf1 (patch)
tree56a3faab8f037bb90724dd20d75a18c33b47fd25
parent199ee608f0d08510b5c6c37f31a7fbff211d63c4 (diff)
net: fix unbounded NetQueue
In the current implementation of qemu, running without a network backend will cause the queue to grow unbounded when the guest is transmitting traffic. This patch fixes the problem by implementing bounded size NetQueue, used with an arbitrary limit of 10000 packets, and dropping packets when the queue is full _and_ the sender does not pass a callback. The second condition makes sure that we never drop packets that contains a callback (which would be tricky, because the producer expects the callback to be run when all previous packets have been consumed; so we cannot run it when the packet is dropped). If documentation is correct, producers that submit a callback should stop sending when their packet is queued, so there is no real risk that the queue exceeds the max size by large values. Signed-off-by: Luigi Rizzo <rizzo@iet.unipi.it> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--net/queue.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/net/queue.c b/net/queue.c
index 6eaf5b63c0..859d02a136 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -50,6 +50,8 @@ struct NetPacket {
struct NetQueue {
void *opaque;
+ uint32_t nq_maxlen;
+ uint32_t nq_count;
QTAILQ_HEAD(packets, NetPacket) packets;
@@ -63,6 +65,8 @@ NetQueue *qemu_new_net_queue(void *opaque)
queue = g_malloc0(sizeof(NetQueue));
queue->opaque = opaque;
+ queue->nq_maxlen = 10000;
+ queue->nq_count = 0;
QTAILQ_INIT(&queue->packets);
@@ -92,6 +96,9 @@ static void qemu_net_queue_append(NetQueue *queue,
{
NetPacket *packet;
+ if (queue->nq_count >= queue->nq_maxlen && !sent_cb) {
+ return; /* drop if queue full and no callback */
+ }
packet = g_malloc(sizeof(NetPacket) + size);
packet->sender = sender;
packet->flags = flags;
@@ -99,6 +106,7 @@ static void qemu_net_queue_append(NetQueue *queue,
packet->sent_cb = sent_cb;
memcpy(packet->data, buf, size);
+ queue->nq_count++;
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
}
@@ -113,6 +121,9 @@ static void qemu_net_queue_append_iov(NetQueue *queue,
size_t max_len = 0;
int i;
+ if (queue->nq_count >= queue->nq_maxlen && !sent_cb) {
+ return; /* drop if queue full and no callback */
+ }
for (i = 0; i < iovcnt; i++) {
max_len += iov[i].iov_len;
}
@@ -130,6 +141,7 @@ static void qemu_net_queue_append_iov(NetQueue *queue,
packet->size += len;
}
+ queue->nq_count++;
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
}
@@ -220,6 +232,7 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
if (packet->sender == from) {
QTAILQ_REMOVE(&queue->packets, packet, entry);
+ queue->nq_count--;
g_free(packet);
}
}
@@ -233,6 +246,7 @@ bool qemu_net_queue_flush(NetQueue *queue)
packet = QTAILQ_FIRST(&queue->packets);
QTAILQ_REMOVE(&queue->packets, packet, entry);
+ queue->nq_count--;
ret = qemu_net_queue_deliver(queue,
packet->sender,
@@ -240,6 +254,7 @@ bool qemu_net_queue_flush(NetQueue *queue)
packet->data,
packet->size);
if (ret == 0) {
+ queue->nq_count++;
QTAILQ_INSERT_HEAD(&queue->packets, packet, entry);
return false;
}