diff options
author | Jun Nakajima <jun.nakajima@intel.com> | 2010-08-31 16:41:25 +0100 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2011-05-08 10:10:01 +0200 |
commit | 432d268c0552fd30c8be564f7ea2504a2b546101 (patch) | |
tree | d74d75e9b70fd2ced0daf705062260dab19b40c4 /xen-all.c | |
parent | 9c11a8ac886ccbb5f8e1b08e8ae12f045d783031 (diff) |
xen: Introduce the Xen mapcache
On IA32 host or IA32 PAE host, at present, generally, we can't create
an HVM guest with more than 2G memory, because generally it's almost
impossible for Qemu to find a large enough and consecutive virtual
address space to map an HVM guest's whole physical address space.
The attached patch fixes this issue using dynamic mapping based on
little blocks of memory.
Each call to qemu_get_ram_ptr makes a call to qemu_map_cache with the
lock option, so mapcache will not unmap these ram_ptr.
Blocks that do not belong to the RAM, but usually to a device ROM or to
a framebuffer, are handled in a separate function. So the whole RAMBlock
can be map.
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'xen-all.c')
-rw-r--r-- | xen-all.c | 66 |
1 files changed, 66 insertions, 0 deletions
@@ -10,6 +10,9 @@ #include "hw/xen_common.h" #include "hw/xen_backend.h" +#include "xen-mapcache.h" +#include "trace.h" + /* Xen specific function for piix pci */ int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) @@ -52,6 +55,65 @@ qemu_irq *xen_interrupt_controller_init(void) return qemu_allocate_irqs(xen_set_irq, NULL, 16); } +/* Memory Ops */ + +static void xen_ram_init(ram_addr_t ram_size) +{ + RAMBlock *new_block; + ram_addr_t below_4g_mem_size, above_4g_mem_size = 0; + + new_block = qemu_mallocz(sizeof (*new_block)); + pstrcpy(new_block->idstr, sizeof (new_block->idstr), "xen.ram"); + new_block->host = NULL; + new_block->offset = 0; + new_block->length = ram_size; + + QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); + + ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty, + new_block->length >> TARGET_PAGE_BITS); + memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), + 0xff, new_block->length >> TARGET_PAGE_BITS); + + if (ram_size >= 0xe0000000 ) { + above_4g_mem_size = ram_size - 0xe0000000; + below_4g_mem_size = 0xe0000000; + } else { + below_4g_mem_size = ram_size; + } + + cpu_register_physical_memory(0, below_4g_mem_size, new_block->offset); +#if TARGET_PHYS_ADDR_BITS > 32 + if (above_4g_mem_size > 0) { + cpu_register_physical_memory(0x100000000ULL, above_4g_mem_size, + new_block->offset + below_4g_mem_size); + } +#endif +} + +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size) +{ + unsigned long nr_pfn; + xen_pfn_t *pfn_list; + int i; + + trace_xen_ram_alloc(ram_addr, size); + + nr_pfn = size >> TARGET_PAGE_BITS; + pfn_list = qemu_malloc(sizeof (*pfn_list) * nr_pfn); + + for (i = 0; i < nr_pfn; i++) { + pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; + } + + if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { + hw_error("xen: failed to populate ram at %lx", ram_addr); + } + + qemu_free(pfn_list); +} + + /* VCPU Operations, MMIO, IO ring ... */ static void xen_reset_vcpu(void *opaque) @@ -86,5 +148,9 @@ int xen_init(void) int xen_hvm_init(void) { + /* Init RAM management */ + qemu_map_cache_init(); + xen_ram_init(ram_size); + return 0; } |