From b4dd7802cac7749de80ceadd622d4209fc31b507 Mon Sep 17 00:00:00 2001 From: Anthony PERARD Date: Tue, 24 May 2011 14:34:21 +0100 Subject: xen: Introduce VGA sync dirty bitmap support This patch introduces phys memory client for Xen. Only sync dirty_bitmap and set_memory are actually implemented. migration_log will stay empty for the moment. Xen can only log one range for bit change, so only the range in the first call will be synced. Signed-off-by: Anthony PERARD Signed-off-by: Alexander Graf --- xen-all.c | 267 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 267 insertions(+) (limited to 'xen-all.c') diff --git a/xen-all.c b/xen-all.c index 0eac202d4e..75a82c28fb 100644 --- a/xen-all.c +++ b/xen-all.c @@ -13,6 +13,7 @@ #include "hw/xen_common.h" #include "hw/xen_backend.h" +#include "range.h" #include "xen-mapcache.h" #include "trace.h" @@ -54,6 +55,14 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) #define BUFFER_IO_MAX_DELAY 100 +typedef struct XenPhysmap { + target_phys_addr_t start_addr; + ram_addr_t size; + target_phys_addr_t phys_offset; + + QLIST_ENTRY(XenPhysmap) list; +} XenPhysmap; + typedef struct XenIOState { shared_iopage_t *shared_page; buffered_iopage_t *buffered_io_page; @@ -66,6 +75,9 @@ typedef struct XenIOState { int send_vcpu; struct xs_handle *xenstore; + CPUPhysMemoryClient client; + QLIST_HEAD(, XenPhysmap) physmap; + const XenPhysmap *log_for_dirtybit; Notifier exit; } XenIOState; @@ -178,6 +190,256 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size) qemu_free(pfn_list); } +static XenPhysmap *get_physmapping(XenIOState *state, + target_phys_addr_t start_addr, ram_addr_t size) +{ + XenPhysmap *physmap = NULL; + + start_addr &= TARGET_PAGE_MASK; + + QLIST_FOREACH(physmap, &state->physmap, list) { + if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { + return physmap; + } + } + return NULL; +} + +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340 +static int xen_add_to_physmap(XenIOState *state, + target_phys_addr_t start_addr, + ram_addr_t size, + target_phys_addr_t phys_offset) +{ + unsigned long i = 0; + int rc = 0; + XenPhysmap *physmap = NULL; + target_phys_addr_t pfn, start_gpfn; + + if (get_physmapping(state, start_addr, size)) { + return 0; + } + if (size <= 0) { + return -1; + } + + DPRINTF("mapping vram to %llx - %llx, from %llx\n", + start_addr, start_addr + size, phys_offset); + + pfn = phys_offset >> TARGET_PAGE_BITS; + start_gpfn = start_addr >> TARGET_PAGE_BITS; + for (i = 0; i < size >> TARGET_PAGE_BITS; i++) { + unsigned long idx = pfn + i; + xen_pfn_t gpfn = start_gpfn + i; + + rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); + if (rc) { + DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %" + PRI_xen_pfn" failed: %d\n", idx, gpfn, rc); + return -rc; + } + } + + physmap = qemu_malloc(sizeof (XenPhysmap)); + + physmap->start_addr = start_addr; + physmap->size = size; + physmap->phys_offset = phys_offset; + + QLIST_INSERT_HEAD(&state->physmap, physmap, list); + + xc_domain_pin_memory_cacheattr(xen_xc, xen_domid, + start_addr >> TARGET_PAGE_BITS, + (start_addr + size) >> TARGET_PAGE_BITS, + XEN_DOMCTL_MEM_CACHEATTR_WB); + return 0; +} + +static int xen_remove_from_physmap(XenIOState *state, + target_phys_addr_t start_addr, + ram_addr_t size) +{ + unsigned long i = 0; + int rc = 0; + XenPhysmap *physmap = NULL; + target_phys_addr_t phys_offset = 0; + + physmap = get_physmapping(state, start_addr, size); + if (physmap == NULL) { + return -1; + } + + phys_offset = physmap->phys_offset; + size = physmap->size; + + DPRINTF("unmapping vram to %llx - %llx, from %llx\n", + phys_offset, phys_offset + size, start_addr); + + size >>= TARGET_PAGE_BITS; + start_addr >>= TARGET_PAGE_BITS; + phys_offset >>= TARGET_PAGE_BITS; + for (i = 0; i < size; i++) { + unsigned long idx = start_addr + i; + xen_pfn_t gpfn = phys_offset + i; + + rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); + if (rc) { + fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %" + PRI_xen_pfn" failed: %d\n", idx, gpfn, rc); + return -rc; + } + } + + QLIST_REMOVE(physmap, list); + if (state->log_for_dirtybit == physmap) { + state->log_for_dirtybit = NULL; + } + free(physmap); + + return 0; +} + +#else +static int xen_add_to_physmap(XenIOState *state, + target_phys_addr_t start_addr, + ram_addr_t size, + target_phys_addr_t phys_offset) +{ + return -ENOSYS; +} + +static int xen_remove_from_physmap(XenIOState *state, + target_phys_addr_t start_addr, + ram_addr_t size) +{ + return -ENOSYS; +} +#endif + +static void xen_client_set_memory(struct CPUPhysMemoryClient *client, + target_phys_addr_t start_addr, + ram_addr_t size, + ram_addr_t phys_offset, + bool log_dirty) +{ + XenIOState *state = container_of(client, XenIOState, client); + ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; + hvmmem_type_t mem_type; + + if (!(start_addr != phys_offset + && ( (log_dirty && flags < IO_MEM_UNASSIGNED) + || (!log_dirty && flags == IO_MEM_UNASSIGNED)))) { + return; + } + + trace_xen_client_set_memory(start_addr, size, phys_offset, log_dirty); + + start_addr &= TARGET_PAGE_MASK; + size = TARGET_PAGE_ALIGN(size); + phys_offset &= TARGET_PAGE_MASK; + + switch (flags) { + case IO_MEM_RAM: + xen_add_to_physmap(state, start_addr, size, phys_offset); + break; + case IO_MEM_ROM: + mem_type = HVMMEM_ram_ro; + if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type, + start_addr >> TARGET_PAGE_BITS, + size >> TARGET_PAGE_BITS)) { + DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n", + start_addr); + } + break; + case IO_MEM_UNASSIGNED: + if (xen_remove_from_physmap(state, start_addr, size) < 0) { + DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); + } + break; + } +} + +static int xen_sync_dirty_bitmap(XenIOState *state, + target_phys_addr_t start_addr, + ram_addr_t size) +{ + target_phys_addr_t npages = size >> TARGET_PAGE_BITS; + target_phys_addr_t vram_offset = 0; + const int width = sizeof(unsigned long) * 8; + unsigned long bitmap[(npages + width - 1) / width]; + int rc, i, j; + const XenPhysmap *physmap = NULL; + + physmap = get_physmapping(state, start_addr, size); + if (physmap == NULL) { + /* not handled */ + return -1; + } + + if (state->log_for_dirtybit == NULL) { + state->log_for_dirtybit = physmap; + } else if (state->log_for_dirtybit != physmap) { + return -1; + } + vram_offset = physmap->phys_offset; + + rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid, + start_addr >> TARGET_PAGE_BITS, npages, + bitmap); + if (rc) { + return rc; + } + + for (i = 0; i < ARRAY_SIZE(bitmap); i++) { + unsigned long map = bitmap[i]; + while (map != 0) { + j = ffsl(map) - 1; + map &= ~(1ul << j); + cpu_physical_memory_set_dirty(vram_offset + (i * width + j) * TARGET_PAGE_SIZE); + }; + } + + return 0; +} + +static int xen_log_start(CPUPhysMemoryClient *client, target_phys_addr_t phys_addr, ram_addr_t size) +{ + XenIOState *state = container_of(client, XenIOState, client); + + return xen_sync_dirty_bitmap(state, phys_addr, size); +} + +static int xen_log_stop(CPUPhysMemoryClient *client, target_phys_addr_t phys_addr, ram_addr_t size) +{ + XenIOState *state = container_of(client, XenIOState, client); + + state->log_for_dirtybit = NULL; + /* Disable dirty bit tracking */ + return xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL); +} + +static int xen_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client, + target_phys_addr_t start_addr, + target_phys_addr_t end_addr) +{ + XenIOState *state = container_of(client, XenIOState, client); + + return xen_sync_dirty_bitmap(state, start_addr, end_addr - start_addr); +} + +static int xen_client_migration_log(struct CPUPhysMemoryClient *client, + int enable) +{ + return 0; +} + +static CPUPhysMemoryClient xen_cpu_phys_memory_client = { + .set_memory = xen_client_set_memory, + .sync_dirty_bitmap = xen_client_sync_dirty_bitmap, + .migration_log = xen_client_migration_log, + .log_start = xen_log_start, + .log_stop = xen_log_stop, +}; /* VCPU Operations, MMIO, IO ring ... */ @@ -581,6 +843,11 @@ int xen_hvm_init(void) qemu_add_vm_change_state_handler(xen_vm_change_state_handler, state); + state->client = xen_cpu_phys_memory_client; + QLIST_INIT(&state->physmap); + cpu_register_phys_memory_client(&state->client); + state->log_for_dirtybit = NULL; + return 0; } -- cgit v1.2.3 From ebed85058b6e89a5202112e9aa2abab3aa3804c3 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 15 Jun 2011 17:29:27 +0100 Subject: xen: only track the linear framebuffer Xen can only do dirty bit tracking for one memory region, so we should explicitly avoid trying to track anything but the vga vram region. Signed-off-by: Stefano Stabellini Signed-off-by: Alexander Graf --- xen-all.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'xen-all.c') diff --git a/xen-all.c b/xen-all.c index 75a82c28fb..fe75ddd833 100644 --- a/xen-all.c +++ b/xen-all.c @@ -215,6 +215,7 @@ static int xen_add_to_physmap(XenIOState *state, int rc = 0; XenPhysmap *physmap = NULL; target_phys_addr_t pfn, start_gpfn; + RAMBlock *block; if (get_physmapping(state, start_addr, size)) { return 0; @@ -223,6 +224,19 @@ static int xen_add_to_physmap(XenIOState *state, return -1; } + /* Xen can only handle a single dirty log region for now and we want + * the linear framebuffer to be that region. + * Avoid tracking any regions that is not videoram and avoid tracking + * the legacy vga region. */ + QLIST_FOREACH(block, &ram_list.blocks, next) { + if (!strcmp(block->idstr, "vga.vram") && block->offset == phys_offset + && start_addr > 0xbffff) { + goto go_physmap; + } + } + return -1; + +go_physmap: DPRINTF("mapping vram to %llx - %llx, from %llx\n", start_addr, start_addr + size, phys_offset); -- cgit v1.2.3