aboutsummaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorIgor Mammedov <imammedo@redhat.com>2016-07-27 11:24:54 +0200
committerEduardo Habkost <ehabkost@redhat.com>2016-09-05 16:01:55 -0300
commit630eb0faf4d3c04a117a047247691e0654632aa8 (patch)
treeb566b85964ad0ed05ca0ab5d25560327d7a4a54b /exec.c
parente87d397e5ef66276ccc49b829527d605ca07d0ad (diff)
exec: Ensure the only one cpu_index allocation method is used
Make sure that cpu_index auto allocation isn't used in combination with manual cpu_index assignment. And dissallow out of order cpu removal if auto allocation is in use. Target that wishes to support out of order unplug should switch to manual cpu_index assignment. Following patch could be used as an example: (pc: init CPUState->cpu_index with index in possible_cpus[])) Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/exec.c b/exec.c
index 8ffde75983..80398b038f 100644
--- a/exec.c
+++ b/exec.c
@@ -598,11 +598,14 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
}
#endif
+static bool cpu_index_auto_assigned;
+
static int cpu_get_free_index(void)
{
CPUState *some_cpu;
int cpu_index = 0;
+ cpu_index_auto_assigned = true;
CPU_FOREACH(some_cpu) {
cpu_index++;
}
@@ -620,6 +623,8 @@ void cpu_exec_exit(CPUState *cpu)
return;
}
+ assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
+
QTAILQ_REMOVE(&cpus, cpu, node);
cpu->node.tqe_prev = NULL;
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
@@ -663,6 +668,8 @@ void cpu_exec_init(CPUState *cpu, Error **errp)
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
cpu->cpu_index = cpu_get_free_index();
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
+ } else {
+ assert(!cpu_index_auto_assigned);
}
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
cpu_list_unlock();