1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
|
/*
* QEMU Hyper-V Dynamic Memory Protocol driver
*
* Copyright (C) 2020-2023 Oracle and/or its affiliates.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H
#define HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H
#include "exec/memory.h"
#include "qom/object.h"
#include "hv-balloon-page_range_tree.h"
/* OurRange */
#define OUR_RANGE(ptr) ((OurRange *)(ptr))
/* "our range" means the memory range owned by this driver (for hot-adding) */
typedef struct OurRange {
PageRange range;
/* How many pages were hot-added to the guest */
uint64_t added;
/* Pages at the end not currently usable */
uint64_t unusable_tail;
/* Memory removed from the guest */
PageRangeTree removed_guest, removed_both;
} OurRange;
static inline uint64_t our_range_get_remaining_start(OurRange *our_range)
{
return our_range->range.start + our_range->added;
}
static inline uint64_t our_range_get_remaining_size(OurRange *our_range)
{
return our_range->range.count - our_range->added - our_range->unusable_tail;
}
void hvb_our_range_mark_added(OurRange *our_range, uint64_t additional_size);
static inline void our_range_mark_remaining_unusable(OurRange *our_range)
{
our_range->unusable_tail = our_range->range.count - our_range->added;
}
static inline PageRangeTree our_range_get_removed_tree(OurRange *our_range,
bool both)
{
if (both) {
return our_range->removed_both;
} else {
return our_range->removed_guest;
}
}
static inline bool our_range_is_removed_tree_empty(OurRange *our_range,
bool both)
{
if (both) {
return page_range_tree_is_empty(our_range->removed_both);
} else {
return page_range_tree_is_empty(our_range->removed_guest);
}
}
void hvb_our_range_clear_removed_trees(OurRange *our_range);
/* OurRangeMemslots */
typedef struct OurRangeMemslotsSlots {
/* Nominal size of each memslot (the last one might be smaller) */
uint64_t size_each;
/* Slots array and its element count */
MemoryRegion *slots;
unsigned int count;
/* How many slots are currently mapped */
unsigned int mapped_count;
} OurRangeMemslotsSlots;
typedef struct OurRangeMemslots {
OurRange range;
/* Memslots covering our range */
OurRangeMemslotsSlots slots;
MemoryRegion *mr;
} OurRangeMemslots;
OurRangeMemslots *hvb_our_range_memslots_new(uint64_t addr,
MemoryRegion *parent_mr,
MemoryRegion *backing_mr,
Object *memslot_owner,
unsigned int memslot_count,
uint64_t memslot_size);
void hvb_our_range_memslots_free(OurRangeMemslots *our_range);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(OurRangeMemslots, hvb_our_range_memslots_free)
void hvb_our_range_memslots_ensure_mapped_additional(OurRangeMemslots *our_range,
uint64_t additional_map_size);
#endif
|