aboutsummaryrefslogtreecommitdiff
path: root/util/hbitmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'util/hbitmap.c')
-rw-r--r--util/hbitmap.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/util/hbitmap.c b/util/hbitmap.c
index 150d6e98b7..a10c7aeeda 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -400,6 +400,54 @@ HBitmap *hbitmap_alloc(uint64_t size, int granularity)
return hb;
}
+void hbitmap_truncate(HBitmap *hb, uint64_t size)
+{
+ bool shrink;
+ unsigned i;
+ uint64_t num_elements = size;
+ uint64_t old;
+
+ /* Size comes in as logical elements, adjust for granularity. */
+ size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity;
+ assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
+ shrink = size < hb->size;
+
+ /* bit sizes are identical; nothing to do. */
+ if (size == hb->size) {
+ return;
+ }
+
+ /* If we're losing bits, let's clear those bits before we invalidate all of
+ * our invariants. This helps keep the bitcount consistent, and will prevent
+ * us from carrying around garbage bits beyond the end of the map.
+ */
+ if (shrink) {
+ /* Don't clear partial granularity groups;
+ * start at the first full one. */
+ uint64_t start = QEMU_ALIGN_UP(num_elements, 1 << hb->granularity);
+ uint64_t fix_count = (hb->size << hb->granularity) - start;
+
+ assert(fix_count);
+ hbitmap_reset(hb, start, fix_count);
+ }
+
+ hb->size = size;
+ for (i = HBITMAP_LEVELS; i-- > 0; ) {
+ size = MAX(BITS_TO_LONGS(size), 1);
+ if (hb->sizes[i] == size) {
+ break;
+ }
+ old = hb->sizes[i];
+ hb->sizes[i] = size;
+ hb->levels[i] = g_realloc(hb->levels[i], size * sizeof(unsigned long));
+ if (!shrink) {
+ memset(&hb->levels[i][old], 0x00,
+ (size - old) * sizeof(*hb->levels[i]));
+ }
+ }
+}
+
+
/**
* Given HBitmaps A and B, let A := A (BITOR) B.
* Bitmap B will not be modified.