aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Auger <eric.auger@redhat.com>2020-09-29 10:55:50 +0200
committerStefan Hajnoczi <stefanha@redhat.com>2020-10-05 10:59:42 +0100
commit9ab5741164b1727d22f69fe7001382baf0d56977 (patch)
tree3975b95109b20b898723acb110061630ec53b257
parent4487d420d0d0d9b503bfb9308e474f0bbac54700 (diff)
util/vfio-helpers: Rework the IOVA allocator to avoid IOVA reserved regions
Introduce the qemu_vfio_find_fixed/temp_iova helpers which respectively allocate IOVAs from the bottom/top parts of the usable IOVA range, without picking within host IOVA reserved windows. The allocation remains basic: if the size is too big for the remaining of the current usable IOVA range, we jump to the next one, leaving a hole in the address map. Signed-off-by: Eric Auger <eric.auger@redhat.com> Message-id: 20200929085550.30926-3-eric.auger@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--util/vfio-helpers.c57
1 files changed, 53 insertions, 4 deletions
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
index fe9ca9ce38..c469beb061 100644
--- a/util/vfio-helpers.c
+++ b/util/vfio-helpers.c
@@ -667,6 +667,50 @@ static bool qemu_vfio_verify_mappings(QEMUVFIOState *s)
return true;
}
+static int
+qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
+{
+ int i;
+
+ for (i = 0; i < s->nb_iova_ranges; i++) {
+ if (s->usable_iova_ranges[i].end < s->low_water_mark) {
+ continue;
+ }
+ s->low_water_mark =
+ MAX(s->low_water_mark, s->usable_iova_ranges[i].start);
+
+ if (s->usable_iova_ranges[i].end - s->low_water_mark + 1 >= size ||
+ s->usable_iova_ranges[i].end - s->low_water_mark + 1 == 0) {
+ *iova = s->low_water_mark;
+ s->low_water_mark += size;
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+
+static int
+qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
+{
+ int i;
+
+ for (i = s->nb_iova_ranges - 1; i >= 0; i--) {
+ if (s->usable_iova_ranges[i].start > s->high_water_mark) {
+ continue;
+ }
+ s->high_water_mark =
+ MIN(s->high_water_mark, s->usable_iova_ranges[i].end + 1);
+
+ if (s->high_water_mark - s->usable_iova_ranges[i].start + 1 >= size ||
+ s->high_water_mark - s->usable_iova_ranges[i].start + 1 == 0) {
+ *iova = s->high_water_mark - size;
+ s->high_water_mark = *iova;
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+
/* Map [host, host + size) area into a contiguous IOVA address space, and store
* the result in @iova if not NULL. The caller need to make sure the area is
* aligned to page size, and mustn't overlap with existing mapping areas (split
@@ -693,7 +737,11 @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
goto out;
}
if (!temporary) {
- iova0 = s->low_water_mark;
+ if (qemu_vfio_find_fixed_iova(s, size, &iova0)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
mapping = qemu_vfio_add_mapping(s, host, size, index + 1, iova0);
if (!mapping) {
ret = -ENOMEM;
@@ -705,15 +753,16 @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
qemu_vfio_undo_mapping(s, mapping, NULL);
goto out;
}
- s->low_water_mark += size;
qemu_vfio_dump_mappings(s);
} else {
- iova0 = s->high_water_mark - size;
+ if (qemu_vfio_find_temp_iova(s, size, &iova0)) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = qemu_vfio_do_mapping(s, host, size, iova0);
if (ret) {
goto out;
}
- s->high_water_mark -= size;
}
}
if (iova) {