aboutsummaryrefslogtreecommitdiff
path: root/hw/arm
diff options
context:
space:
mode:
authorIgor Mammedov <imammedo@redhat.com>2017-05-10 13:29:45 +0200
committerEduardo Habkost <ehabkost@redhat.com>2017-05-11 16:08:48 -0300
commitea089eebbd80e61d3c3cd03741dd5d9535c551fc (patch)
treef868c4e6bc6f311b8513aaf465da5f343f96d6db /hw/arm
parentd9c34f9c6c745f6a8123a872b33990ab564c614a (diff)
numa: move source of default CPUs to NUMA node mapping into boards
Originally CPU threads were by default assigned in round-robin fashion. However it was causing issues in guest since CPU threads from the same socket/core could be placed on different NUMA nodes. Commit fb43b73b (pc: fix default VCPU to NUMA node mapping) fixed it by grouping threads within a socket on the same node introducing cpu_index_to_socket_id() callback and commit 20bb648d (spapr: Fix default NUMA node allocation for threads) reused callback to fix similar issues for SPAPR machine even though socket doesn't make much sense there. As result QEMU ended up having 3 default distribution rules used by 3 targets /virt-arm, spapr, pc/. In effort of moving NUMA mapping for CPUs into possible_cpus, generalize default mapping in numa.c by making boards decide on default mapping and let them explicitly tell generic numa code to which node a CPU thread belongs to by replacing cpu_index_to_socket_id() with @cpu_index_to_instance_props() which provides default node_id assigned by board to specified cpu_index. Signed-off-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> Message-Id: <1494415802-227633-2-git-send-email-imammedo@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Diffstat (limited to 'hw/arm')
-rw-r--r--hw/arm/virt.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index acc748ec40..dfd6fd446c 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -1539,6 +1539,16 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp)
}
}
+static CpuInstanceProperties
+virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
+
+ assert(cpu_index < possible_cpus->len);
+ return possible_cpus->cpus[cpu_index].props;
+}
+
static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
{
int n;
@@ -1558,8 +1568,13 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
ms->possible_cpus->cpus[n].props.has_thread_id = true;
ms->possible_cpus->cpus[n].props.thread_id = n;
- /* TODO: add 'has_node/node' here to describe
- to which node core belongs */
+ /* default distribution of CPUs over NUMA nodes */
+ if (nb_numa_nodes) {
+ /* preset values but do not enable them i.e. 'has_node_id = false',
+ * numa init code will enable them later if manual mapping wasn't
+ * present on CLI */
+ ms->possible_cpus->cpus[n].props.node_id = n % nb_numa_nodes;
+ }
}
return ms->possible_cpus;
}
@@ -1581,6 +1596,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
/* We know we will never create a pre-ARMv7 CPU which needs 1K pages */
mc->minimum_page_bits = 12;
mc->possible_cpu_arch_ids = virt_possible_cpu_arch_ids;
+ mc->cpu_index_to_instance_props = virt_cpu_index_to_props;
}
static const TypeInfo virt_machine_info = {