aboutsummaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2011-06-22 07:07:55 -0500
committerAnthony Liguori <aliguori@us.ibm.com>2011-06-22 07:07:55 -0500
commit7ee28fd303003d70bb4c142e6ad4b92b7383b5b4 (patch)
tree03781532cc1e7b53d5ce4ffd3a49d515dcdf21c3 /exec.c
parentbcd2491a48787542160d75fb7468907971b04478 (diff)
parent01195b7347e341a49fa554959882b26c666a3616 (diff)
Merge remote-tracking branch 'agraf/xen-next' into staging
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c88
1 files changed, 43 insertions, 45 deletions
diff --git a/exec.c b/exec.c
index 09928a3b37..7f14332c87 100644
--- a/exec.c
+++ b/exec.c
@@ -53,6 +53,7 @@
#endif
#else /* !CONFIG_USER_ONLY */
#include "xen-mapcache.h"
+#include "trace.h"
#endif
//#define DEBUG_TB_INVALIDATE
@@ -3084,11 +3085,12 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
if (xen_mapcache_enabled()) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
+ * In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return qemu_map_cache(addr, 0, 1);
+ return qemu_map_cache(addr, 0, 0);
} else if (block->host == NULL) {
- block->host = xen_map_block(block->offset, block->length);
+ block->host = qemu_map_cache(block->offset, block->length, 1);
}
}
return block->host + (addr - block->offset);
@@ -3113,11 +3115,12 @@ void *qemu_safe_ram_ptr(ram_addr_t addr)
if (xen_mapcache_enabled()) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
+ * In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return qemu_map_cache(addr, 0, 1);
+ return qemu_map_cache(addr, 0, 0);
} else if (block->host == NULL) {
- block->host = xen_map_block(block->offset, block->length);
+ block->host = qemu_map_cache(block->offset, block->length, 1);
}
}
return block->host + (addr - block->offset);
@@ -3130,32 +3133,46 @@ void *qemu_safe_ram_ptr(ram_addr_t addr)
return NULL;
}
-void qemu_put_ram_ptr(void *addr)
+/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
+ * but takes a size argument */
+void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size)
{
- trace_qemu_put_ram_ptr(addr);
-
- if (xen_mapcache_enabled()) {
+ if (xen_mapcache_enabled())
+ return qemu_map_cache(addr, *size, 1);
+ else {
RAMBlock *block;
QLIST_FOREACH(block, &ram_list.blocks, next) {
- if (addr == block->host) {
- break;
+ if (addr - block->offset < block->length) {
+ if (addr - block->offset + *size > block->length)
+ *size = block->length - addr + block->offset;
+ return block->host + (addr - block->offset);
}
}
- if (block && block->host) {
- xen_unmap_block(block->host, block->length);
- block->host = NULL;
- } else {
- qemu_map_cache_unlock(addr);
- }
+
+ fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
+ abort();
+
+ *size = 0;
+ return NULL;
}
}
+void qemu_put_ram_ptr(void *addr)
+{
+ trace_qemu_put_ram_ptr(addr);
+}
+
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
{
RAMBlock *block;
uint8_t *host = ptr;
+ if (xen_mapcache_enabled()) {
+ *ram_addr = qemu_ram_addr_from_mapcache(ptr);
+ return 0;
+ }
+
QLIST_FOREACH(block, &ram_list.blocks, next) {
/* This case append when the block is not mapped. */
if (block->host == NULL) {
@@ -3167,11 +3184,6 @@ int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
}
}
- if (xen_mapcache_enabled()) {
- *ram_addr = qemu_ram_addr_from_mapcache(ptr);
- return 0;
- }
-
return -1;
}
@@ -4003,14 +4015,12 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
int is_write)
{
target_phys_addr_t len = *plen;
- target_phys_addr_t done = 0;
+ target_phys_addr_t todo = 0;
int l;
- uint8_t *ret = NULL;
- uint8_t *ptr;
target_phys_addr_t page;
unsigned long pd;
PhysPageDesc *p;
- unsigned long addr1;
+ target_phys_addr_t addr1 = addr;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
@@ -4025,7 +4035,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
}
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- if (done || bounce.buffer) {
+ if (todo || bounce.buffer) {
break;
}
bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
@@ -4034,23 +4044,17 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
if (!is_write) {
cpu_physical_memory_read(addr, bounce.buffer, l);
}
- ptr = bounce.buffer;
- } else {
- addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
- ptr = qemu_get_ram_ptr(addr1);
- }
- if (!done) {
- ret = ptr;
- } else if (ret + done != ptr) {
- break;
+
+ *plen = l;
+ return bounce.buffer;
}
len -= l;
addr += l;
- done += l;
+ todo += l;
}
- *plen = done;
- return ret;
+ *plen = todo;
+ return qemu_ram_ptr_length(addr1, plen);
}
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
@@ -4080,13 +4084,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
}
}
if (xen_mapcache_enabled()) {
- uint8_t *buffer1 = buffer;
- uint8_t *end_buffer = buffer + len;
-
- while (buffer1 < end_buffer) {
- qemu_put_ram_ptr(buffer1);
- buffer1 += TARGET_PAGE_SIZE;
- }
+ qemu_invalidate_entry(buffer);
}
return;
}