diff options
author | Hanweidong <hanweidong@huawei.com> | 2013-04-02 13:22:41 +0000 |
---|---|---|
committer | Stefano Stabellini <stefano.stabellini@eu.citrix.com> | 2013-04-03 11:51:53 +0000 |
commit | 044d4e1aae539bd4214175bd9591b3de7986cf18 (patch) | |
tree | ff7de919a19b2b71cccde39389bc881a31129bc7 /xen-mapcache.c | |
parent | e2deee3ea6136b6189e8cfd26379420b9a398d96 (diff) |
xen-mapcache: pass the right size argument to test_bits
Compute the correct size for test_bits().
qemu_get_ram_ptr() and qemu_safe_ram_ptr() will call xen_map_cache()
with size is 0 if the requested address is in the RAM. Then
xen_map_cache() will pass the size 0 to test_bits() for checking if the
corresponding pfn was mapped in cache. But test_bits() will always
return 1 when size is 0 without any bit testing. Actually, for this
case, test_bits should check one bit. So this patch introduced a
__test_bit_size which is greater than 0 and a multiple of XC_PAGE_SIZE,
then test_bits can work correctly with __test_bit_size
>> XC_PAGE_SHIFT as its size.
Signed-off-by: Zhenguo Wang <wangzhenguo@huawei.com>
Signed-off-by: Weidong Han <hanweidong@huawei.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Diffstat (limited to 'xen-mapcache.c')
-rw-r--r-- | xen-mapcache.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/xen-mapcache.c b/xen-mapcache.c index a80cbdb6a7..5a626cdf84 100644 --- a/xen-mapcache.c +++ b/xen-mapcache.c @@ -200,6 +200,7 @@ uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, hwaddr address_index; hwaddr address_offset; hwaddr __size = size; + hwaddr __test_bit_size = size; bool translated = false; tryagain: @@ -208,9 +209,23 @@ tryagain: trace_xen_map_cache(phys_addr); + /* __test_bit_size is always a multiple of XC_PAGE_SIZE */ + if (size) { + __test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1)); + + if (__test_bit_size % XC_PAGE_SIZE) { + __test_bit_size += XC_PAGE_SIZE - (__test_bit_size % XC_PAGE_SIZE); + } + } else { + __test_bit_size = XC_PAGE_SIZE; + } + if (mapcache->last_entry != NULL && mapcache->last_entry->paddr_index == address_index && - !lock && !__size) { + !lock && !__size && + test_bits(address_offset >> XC_PAGE_SHIFT, + __test_bit_size >> XC_PAGE_SHIFT, + mapcache->last_entry->valid_mapping)) { trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); return mapcache->last_entry->vaddr_base + address_offset; } @@ -229,7 +244,8 @@ tryagain: while (entry && entry->lock && entry->vaddr_base && (entry->paddr_index != address_index || entry->size != __size || - !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + !test_bits(address_offset >> XC_PAGE_SHIFT, + __test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping))) { pentry = entry; entry = entry->next; @@ -241,13 +257,15 @@ tryagain: } else if (!entry->lock) { if (!entry->vaddr_base || entry->paddr_index != address_index || entry->size != __size || - !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + !test_bits(address_offset >> XC_PAGE_SHIFT, + __test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { xen_remap_bucket(entry, __size, address_index); } } - if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + if(!test_bits(address_offset >> XC_PAGE_SHIFT, + __test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { mapcache->last_entry = NULL; if (!translated && mapcache->phys_offset_to_gaddr) { |