aboutsummaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorUmesh Deshpande <udeshpan@redhat.com>2011-08-17 00:01:33 -0700
committerJuan Quintela <quintela@redhat.com>2012-12-20 23:08:47 +0100
commitb2a8658ef5dc57ea9e7a45091724a719dd4bdcd3 (patch)
tree0ec9f7220ae6bc55adaca771b7ac0f144dd14e7f /exec.c
parentf798b07f517143df3a1e38bccc3f72ade2f080dc (diff)
protect the ramlist with a separate mutex
Add the new mutex that protects shared state between ram_save_live and the iothread. If the iothread mutex has to be taken together with the ramlist mutex, the iothread shall always be _outside_. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Umesh Deshpande <udeshpan@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com> Reviewed-by: Orit Wasserman <owasserm@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/exec.c b/exec.c
index 8478bef548..a6923addd4 100644
--- a/exec.c
+++ b/exec.c
@@ -213,6 +213,7 @@ bool memory_region_is_unassigned(MemoryRegion *mr)
void cpu_exec_init_all(void)
{
#if !defined(CONFIG_USER_ONLY)
+ qemu_mutex_init(&ram_list.mutex);
memory_map_init();
io_mem_init();
#endif
@@ -801,6 +802,16 @@ void qemu_flush_coalesced_mmio_buffer(void)
kvm_flush_coalesced_mmio_buffer();
}
+void qemu_mutex_lock_ramlist(void)
+{
+ qemu_mutex_lock(&ram_list.mutex);
+}
+
+void qemu_mutex_unlock_ramlist(void)
+{
+ qemu_mutex_unlock(&ram_list.mutex);
+}
+
#if defined(__linux__) && !defined(TARGET_S390X)
#include <sys/vfs.h>
@@ -982,6 +993,8 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
@@ -989,6 +1002,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
abort();
}
}
+ qemu_mutex_unlock_ramlist();
}
static int memory_try_enable_merging(void *addr, size_t len)
@@ -1012,6 +1026,8 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
size = TARGET_PAGE_ALIGN(size);
new_block = g_malloc0(sizeof(*new_block));
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
new_block->mr = mr;
new_block->offset = find_ram_offset(size);
if (host) {
@@ -1057,6 +1073,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
ram_list.mru_block = NULL;
ram_list.version++;
+ qemu_mutex_unlock_ramlist();
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
last_ram_offset() >> TARGET_PAGE_BITS);
@@ -1082,21 +1099,26 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
{
RAMBlock *block;
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QTAILQ_REMOVE(&ram_list.blocks, block, next);
ram_list.mru_block = NULL;
ram_list.version++;
g_free(block);
- return;
+ break;
}
}
+ qemu_mutex_unlock_ramlist();
}
void qemu_ram_free(ram_addr_t addr)
{
RAMBlock *block;
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QTAILQ_REMOVE(&ram_list.blocks, block, next);
@@ -1127,9 +1149,10 @@ void qemu_ram_free(ram_addr_t addr)
#endif
}
g_free(block);
- return;
+ break;
}
}
+ qemu_mutex_unlock_ramlist();
}
@@ -1207,6 +1230,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
{
RAMBlock *block;
+ /* The list is protected by the iothread lock here. */
block = ram_list.mru_block;
if (block && addr - block->offset < block->length) {
goto found;
@@ -1246,6 +1270,7 @@ static void *qemu_safe_ram_ptr(ram_addr_t addr)
{
RAMBlock *block;
+ /* The list is protected by the iothread lock here. */
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
if (xen_enabled()) {