aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuan Quintela <quintela@redhat.com>2023-05-11 16:12:05 +0200
committerJuan Quintela <quintela@redhat.com>2023-05-15 10:33:04 +0200
commitbeeda9b7cde6d62cda4700dc4c28e1142690cae2 (patch)
treeb77098b28c20dc03c6238b0cb7331428f5b7d87f
parent62c5e181eef9482c081bed2cdcc61c1091a7739c (diff)
Use new created qemu_target_pages_to_MiB()
Signed-off-by: Juan Quintela <quintela@redhat.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20230511141208.17779-3-quintela@redhat.com>
-rw-r--r--migration/dirtyrate.c11
-rw-r--r--softmmu/dirtylimit.c11
2 files changed, 8 insertions, 14 deletions
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 5bac984fa5..ae52c42c4c 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -16,6 +16,7 @@
#include "qapi/error.h"
#include "cpu.h"
#include "exec/ramblock.h"
+#include "exec/target_page.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "qemu/main-loop.h"
@@ -75,13 +76,11 @@ static inline void record_dirtypages(DirtyPageRecord *dirty_pages,
static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
int64_t calc_time_ms)
{
- uint64_t memory_size_MB;
uint64_t increased_dirty_pages =
dirty_pages.end_pages - dirty_pages.start_pages;
+ uint64_t memory_size_MiB = qemu_target_pages_to_MiB(increased_dirty_pages);
- memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20;
-
- return memory_size_MB * 1000 / calc_time_ms;
+ return memory_size_MiB * 1000 / calc_time_ms;
}
void global_dirty_log_change(unsigned int flag, bool start)
@@ -292,8 +291,8 @@ static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
/* size of total pages in MB */
- DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages *
- TARGET_PAGE_SIZE) >> 20;
+ DirtyStat.page_sampling.total_block_mem_MB +=
+ qemu_target_pages_to_MiB(info->ramblock_pages);
}
static void update_dirtyrate(uint64_t msec)
diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index 71bf6dc7a4..015a9038d1 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -235,20 +235,15 @@ bool dirtylimit_vcpu_index_valid(int cpu_index)
static uint64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate)
{
static uint64_t max_dirtyrate;
- unsigned target_page_bits = qemu_target_page_bits();
- uint64_t dirty_ring_size_MB;
+ uint64_t dirty_ring_size_MiB;
- /* So far, the largest (non-huge) page size is 64k, i.e. 16 bits. */
- assert(target_page_bits < 20);
-
- /* Convert ring size (pages) to MiB (2**20). */
- dirty_ring_size_MB = kvm_dirty_ring_size() >> (20 - target_page_bits);
+ dirty_ring_size_MiB = qemu_target_pages_to_MiB(kvm_dirty_ring_size());
if (max_dirtyrate < dirtyrate) {
max_dirtyrate = dirtyrate;
}
- return dirty_ring_size_MB * 1000000 / max_dirtyrate;
+ return dirty_ring_size_MiB * 1000000 / max_dirtyrate;
}
static inline bool dirtylimit_done(uint64_t quota,