diff options
author | Mark McLoughlin <markmc@redhat.com> | 2009-06-17 11:38:28 +0100 |
---|---|---|
committer | Anthony Liguori <aliguori@us.ibm.com> | 2009-06-22 10:10:50 -0500 |
commit | efeea6d048756bc42ad39f0acce6bede4b74177a (patch) | |
tree | 667e4307b915ca1f6f4711f4a7f6d057e61201a9 /hw/virtio.c | |
parent | 5774cf98ca7da4161ee4265a2db0d92ffff005f3 (diff) |
virtio: add support for indirect ring entries
Support a new feature flag for indirect ring entries. These are ring
entries which point to a table of buffer descriptors.
The idea here is to increase the ring capacity by allowing a larger
effective ring size whereby the ring size dictates the number of
requests that may be outstanding, rather than the size of those
requests.
This should be most effective in the case of block I/O where we can
potentially benefit by concurrently dispatching a large number of
large requests. Even in the simple case of single segment block
requests, this results in a threefold increase in ring capacity.
Signed-off-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'hw/virtio.c')
-rw-r--r-- | hw/virtio.c | 51 |
1 files changed, 46 insertions, 5 deletions
diff --git a/hw/virtio.c b/hw/virtio.c index 1e8376d556..af71d99e4c 100644 --- a/hw/virtio.c +++ b/hw/virtio.c @@ -293,18 +293,41 @@ static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa, int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes) { - target_phys_addr_t desc_pa = vq->vring.desc; - unsigned int idx, max; - int num_bufs, in_total, out_total; + unsigned int idx; + int total_bufs, in_total, out_total; idx = vq->last_avail_idx; - max = vq->vring.num; - num_bufs = in_total = out_total = 0; + total_bufs = in_total = out_total = 0; while (virtqueue_num_heads(vq, idx)) { + unsigned int max, num_bufs, indirect = 0; + target_phys_addr_t desc_pa; int i; + max = vq->vring.num; + num_bufs = total_bufs; i = virtqueue_get_head(vq, idx++); + desc_pa = vq->vring.desc; + + if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { + if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { + fprintf(stderr, "Invalid size for indirect buffer table\n"); + exit(1); + } + + /* If we've got too many, that implies a descriptor loop. */ + if (num_bufs >= max) { + fprintf(stderr, "Looped descriptor"); + exit(1); + } + + /* loop over the indirect descriptor table */ + indirect = 1; + max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); + num_bufs = i = 0; + desc_pa = vring_desc_addr(desc_pa, i); + } + do { /* If we've got too many, that implies a descriptor loop. */ if (++num_bufs > max) { @@ -322,6 +345,11 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes) return 1; } } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max); + + if (!indirect) + total_bufs = num_bufs; + else + total_bufs++; } return 0; @@ -342,6 +370,19 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) max = vq->vring.num; i = head = virtqueue_get_head(vq, vq->last_avail_idx++); + + if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { + if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { + fprintf(stderr, "Invalid size for indirect buffer table\n"); + exit(1); + } + + /* loop over the indirect descriptor table */ + max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); + desc_pa = vring_desc_addr(desc_pa, i); + i = 0; + } + do { struct iovec *sg; int is_write = 0; |