diff options
author | Wei Yang <richardw.yang@linux.intel.com> | 2019-03-11 13:42:52 +0800 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-03-11 16:51:42 +0100 |
commit | 494d199727ba248c96326b4e1c97f86eb11a5ec7 (patch) | |
tree | a17cbb6a37f783da806280537e5c2ae4e72ba92f /exec.c | |
parent | e6c165f364c669b1357f15602ae3bd1d12357135 (diff) |
exec.c: refactor function flatview_add_to_dispatch()
flatview_add_to_dispatch() registers page based on the condition of
*section*, which may looks like this:
|s|PPPPPPP|s|
where s stands for subpage and P for page.
The procedure of this function could be described as:
- register first subpage
- register page
- register last subpage
This means the procedure could be simplified into these three steps
instead of a loop iteration.
This patch refactors the function into three corresponding steps and
adds some comment to clarify it.
Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
Message-Id: <20190311054252.6094-1-richardw.yang@linux.intel.com>
[Paolo: move exit before adjustment of remain.offset_within_*,
otherwise int128_get64 fails when a region is 2^64 bytes long]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 48 |
1 files changed, 31 insertions, 17 deletions
@@ -1599,35 +1599,49 @@ static void register_multipage(FlatView *fv, phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); } +/* + * The range in *section* may look like this: + * + * |s|PPPPPPP|s| + * + * where s stands for subpage and P for page. + */ void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section) { - MemoryRegionSection now = *section, remain = *section; + MemoryRegionSection remain = *section; Int128 page_size = int128_make64(TARGET_PAGE_SIZE); - if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { - uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) - - now.offset_within_address_space; + /* register first subpage */ + if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { + uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) + - remain.offset_within_address_space; + MemoryRegionSection now = remain; now.size = int128_min(int128_make64(left), now.size); register_subpage(fv, &now); - } else { - now.size = int128_zero(); - } - while (int128_ne(remain.size, now.size)) { + if (int128_eq(remain.size, now.size)) { + return; + } remain.size = int128_sub(remain.size, now.size); remain.offset_within_address_space += int128_get64(now.size); remain.offset_within_region += int128_get64(now.size); - now = remain; - if (int128_lt(remain.size, page_size)) { - register_subpage(fv, &now); - } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { - now.size = page_size; - register_subpage(fv, &now); - } else { - now.size = int128_and(now.size, int128_neg(page_size)); - register_multipage(fv, &now); + } + + /* register whole pages */ + if (int128_ge(remain.size, page_size)) { + MemoryRegionSection now = remain; + now.size = int128_and(now.size, int128_neg(page_size)); + register_multipage(fv, &now); + if (int128_eq(remain.size, now.size)) { + return; } + remain.size = int128_sub(remain.size, now.size); + remain.offset_within_address_space += int128_get64(now.size); + remain.offset_within_region += int128_get64(now.size); } + + /* register last subpage */ + register_subpage(fv, &remain); } void qemu_flush_coalesced_mmio_buffer(void) |