aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-02-29 13:22:12 +0200
committerAvi Kivity <avi@redhat.com>2012-03-01 14:50:07 +0200
commit8f6f962b994e1402935055ac7093ac977ccc9a5c (patch)
treefdbfb7dafd54db06386582f9a95e0b9cdb315eea
parent7c51c1aa03a52b9fd75ed1ade2e65d079ae4d50e (diff)
kvm: fix unaligned slots
kvm_set_phys_mem() may be passed sections that are not aligned to a page boundary. The current code simply brute-forces the alignment which leads to an inconsistency and an abort(). Fix by aligning the start and the end of the section correctly, discarding and unaligned head or tail. This was triggered by a guest sizing a 64-bit BAR that is smaller than a page with PCI_COMMAND_MEMORY enabled and the upper dword clear. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--kvm-all.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/kvm-all.c b/kvm-all.c
index c4babdac0d..4b7a4ae5dd 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -541,17 +541,26 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
target_phys_addr_t start_addr = section->offset_within_address_space;
ram_addr_t size = section->size;
void *ram = NULL;
+ unsigned delta;
/* kvm works in page size chunks, but the function may be called
with sub-page size and unaligned start address. */
- size = TARGET_PAGE_ALIGN(size);
- start_addr = TARGET_PAGE_ALIGN(start_addr);
+ delta = TARGET_PAGE_ALIGN(size) - size;
+ if (delta > size) {
+ return;
+ }
+ start_addr += delta;
+ size -= delta;
+ size &= TARGET_PAGE_MASK;
+ if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
+ return;
+ }
if (!memory_region_is_ram(mr)) {
return;
}
- ram = memory_region_get_ram_ptr(mr) + section->offset_within_region;
+ ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
while (1) {
mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);