diff options
author | Stefano Stabellini <stefano.stabellini@eu.citrix.com> | 2011-05-19 18:35:42 +0100 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2011-06-19 04:40:04 +0200 |
commit | c13390cd384a9564e6dded127d01ef0627b6b1c5 (patch) | |
tree | b9d856d3219a4f0039b5b059f0214b4aac225fa2 | |
parent | b4dd7802cac7749de80ceadd622d4209fc31b507 (diff) |
xen: fix qemu_map_cache with size != MCACHE_BUCKET_SIZE
Fix the implementation of qemu_map_cache: correctly support size
arguments different from 0 or MCACHE_BUCKET_SIZE.
The new implementation supports locked mapcache entries with size
multiple of MCACHE_BUCKET_SIZE. qemu_invalidate_entry can correctly
find and unmap these "large" mapcache entries given that the virtual
address passed to qemu_invalidate_entry is the same returned by
qemu_map_cache when the locked mapcache entry was created.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r-- | xen-mapcache.c | 77 |
1 files changed, 65 insertions, 12 deletions
diff --git a/xen-mapcache.c b/xen-mapcache.c index 349cc6221d..90fbd4983f 100644 --- a/xen-mapcache.c +++ b/xen-mapcache.c @@ -43,14 +43,16 @@ typedef struct MapCacheEntry { target_phys_addr_t paddr_index; uint8_t *vaddr_base; - DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT); + unsigned long *valid_mapping; uint8_t lock; + target_phys_addr_t size; struct MapCacheEntry *next; } MapCacheEntry; typedef struct MapCacheRev { uint8_t *vaddr_req; target_phys_addr_t paddr_index; + target_phys_addr_t size; QTAILQ_ENTRY(MapCacheRev) next; } MapCacheRev; @@ -68,6 +70,15 @@ typedef struct MapCache { static MapCache *mapcache; +static inline int test_bits(int nr, int size, const unsigned long *addr) +{ + unsigned long res = find_next_zero_bit(addr, size + nr, nr); + if (res >= nr + size) + return 1; + else + return 0; +} + void qemu_map_cache_init(void) { unsigned long size; @@ -115,11 +126,15 @@ static void qemu_remap_bucket(MapCacheEntry *entry, err = qemu_mallocz(nb_pfn * sizeof (int)); if (entry->vaddr_base != NULL) { - if (munmap(entry->vaddr_base, size) != 0) { + if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); } } + if (entry->valid_mapping != NULL) { + qemu_free(entry->valid_mapping); + entry->valid_mapping = NULL; + } for (i = 0; i < nb_pfn; i++) { pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; @@ -134,6 +149,9 @@ static void qemu_remap_bucket(MapCacheEntry *entry, entry->vaddr_base = vaddr_base; entry->paddr_index = address_index; + entry->size = size; + entry->valid_mapping = (unsigned long *) qemu_mallocz(sizeof(unsigned long) * + BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); bitmap_zero(entry->valid_mapping, nb_pfn); for (i = 0; i < nb_pfn; i++) { @@ -151,32 +169,47 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u MapCacheEntry *entry, *pentry = NULL; target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT; target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); + target_phys_addr_t __size = size; trace_qemu_map_cache(phys_addr); - if (address_index == mapcache->last_address_index && !lock) { + if (address_index == mapcache->last_address_index && !lock && !__size) { trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset); return mapcache->last_address_vaddr + address_offset; } + /* size is always a multiple of MCACHE_BUCKET_SIZE */ + if ((address_offset + (__size % MCACHE_BUCKET_SIZE)) > MCACHE_BUCKET_SIZE) + __size += MCACHE_BUCKET_SIZE; + if (__size % MCACHE_BUCKET_SIZE) + __size += MCACHE_BUCKET_SIZE - (__size % MCACHE_BUCKET_SIZE); + if (!__size) + __size = MCACHE_BUCKET_SIZE; + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; - while (entry && entry->lock && entry->paddr_index != address_index && entry->vaddr_base) { + while (entry && entry->lock && entry->vaddr_base && + (entry->paddr_index != address_index || entry->size != __size || + !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + entry->valid_mapping))) { pentry = entry; entry = entry->next; } if (!entry) { entry = qemu_mallocz(sizeof (MapCacheEntry)); pentry->next = entry; - qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); + qemu_remap_bucket(entry, __size, address_index); } else if (!entry->lock) { if (!entry->vaddr_base || entry->paddr_index != address_index || - !test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { - qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); + entry->size != __size || + !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + qemu_remap_bucket(entry, __size, address_index); } } - if (!test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { + if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { mapcache->last_address_index = -1; trace_qemu_map_cache_return(NULL); return NULL; @@ -189,6 +222,7 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u entry->lock++; reventry->vaddr_req = mapcache->last_address_vaddr + address_offset; reventry->paddr_index = mapcache->last_address_index; + reventry->size = entry->size; QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); } @@ -231,13 +265,16 @@ void qemu_map_cache_unlock(void *buffer) ram_addr_t qemu_ram_addr_from_mapcache(void *ptr) { + MapCacheEntry *entry = NULL, *pentry = NULL; MapCacheRev *reventry; target_phys_addr_t paddr_index; + target_phys_addr_t size; int found = 0; QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { if (reventry->vaddr_req == ptr) { paddr_index = reventry->paddr_index; + size = reventry->size; found = 1; break; } @@ -252,7 +289,17 @@ ram_addr_t qemu_ram_addr_from_mapcache(void *ptr) return 0; } - return paddr_index << MCACHE_BUCKET_SHIFT; + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr); + return 0; + } + return (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + + ((unsigned long) ptr - (unsigned long) entry->vaddr_base); } void qemu_invalidate_entry(uint8_t *buffer) @@ -260,6 +307,7 @@ void qemu_invalidate_entry(uint8_t *buffer) MapCacheEntry *entry = NULL, *pentry = NULL; MapCacheRev *reventry; target_phys_addr_t paddr_index; + target_phys_addr_t size; int found = 0; if (mapcache->last_address_vaddr == buffer) { @@ -269,6 +317,7 @@ void qemu_invalidate_entry(uint8_t *buffer) QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { if (reventry->vaddr_req == buffer) { paddr_index = reventry->paddr_index; + size = reventry->size; found = 1; break; } @@ -284,7 +333,7 @@ void qemu_invalidate_entry(uint8_t *buffer) qemu_free(reventry); entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; - while (entry && entry->paddr_index != paddr_index) { + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { pentry = entry; entry = entry->next; } @@ -298,10 +347,11 @@ void qemu_invalidate_entry(uint8_t *buffer) } pentry->next = entry->next; - if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { + if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); } + qemu_free(entry->valid_mapping); qemu_free(entry); } @@ -328,13 +378,16 @@ void qemu_invalidate_map_cache(void) continue; } - if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { + if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); } entry->paddr_index = 0; entry->vaddr_base = NULL; + entry->size = 0; + qemu_free(entry->valid_mapping); + entry->valid_mapping = NULL; } mapcache->last_address_index = -1; |