aboutsummaryrefslogtreecommitdiff
path: root/hw/arm/virt.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/arm/virt.c')
-rw-r--r--hw/arm/virt.c123
1 files changed, 101 insertions, 22 deletions
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 5f62a0321e..c7c8159dfd 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -338,7 +338,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
{
int cpu;
int addr_cells = 1;
- unsigned int i;
+ const MachineState *ms = MACHINE(vms);
/*
* From Documentation/devicetree/bindings/arm/cpus.txt
@@ -369,6 +369,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
for (cpu = vms->smp_cpus - 1; cpu >= 0; cpu--) {
char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
+ CPUState *cs = CPU(armcpu);
qemu_fdt_add_subnode(vms->fdt, nodename);
qemu_fdt_setprop_string(vms->fdt, nodename, "device_type", "cpu");
@@ -389,9 +390,9 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
armcpu->mp_affinity);
}
- i = numa_get_node_for_cpu(cpu);
- if (i < nb_numa_nodes) {
- qemu_fdt_setprop_cell(vms->fdt, nodename, "numa-node-id", i);
+ if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) {
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "numa-node-id",
+ ms->possible_cpus->cpus[cs->cpu_index].props.node_id);
}
g_free(nodename);
@@ -1194,10 +1195,35 @@ void virt_machine_done(Notifier *notifier, void *data)
virt_build_smbios(vms);
}
+static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx)
+{
+ uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER;
+ VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
+
+ if (!vmc->disallow_affinity_adjustment) {
+ /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the
+ * GIC's target-list limitations. 32-bit KVM hosts currently
+ * always create clusters of 4 CPUs, but that is expected to
+ * change when they gain support for gicv3. When KVM is enabled
+ * it will override the changes we make here, therefore our
+ * purposes are to make TCG consistent (with 64-bit KVM hosts)
+ * and to improve SGI efficiency.
+ */
+ if (vms->gic_version == 3) {
+ clustersz = GICV3_TARGETLIST_BITS;
+ } else {
+ clustersz = GIC_TARGETLIST_BITS;
+ }
+ }
+ return arm_cpu_mp_affinity(idx, clustersz);
+}
+
static void machvirt_init(MachineState *machine)
{
VirtMachineState *vms = VIRT_MACHINE(machine);
VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(machine);
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ const CPUArchIdList *possible_cpus;
qemu_irq pic[NUM_IRQS];
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *secure_sysmem = NULL;
@@ -1210,7 +1236,6 @@ static void machvirt_init(MachineState *machine)
CPUClass *cc;
Error *err = NULL;
bool firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0);
- uint8_t clustersz;
if (!cpu_model) {
cpu_model = "cortex-a15";
@@ -1263,10 +1288,8 @@ static void machvirt_init(MachineState *machine)
*/
if (vms->gic_version == 3) {
virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000;
- clustersz = GICV3_TARGETLIST_BITS;
} else {
virt_max_cpus = GIC_NCPU;
- clustersz = GIC_TARGETLIST_BITS;
}
if (max_cpus > virt_max_cpus) {
@@ -1324,21 +1347,35 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
- for (n = 0; n < smp_cpus; n++) {
- Object *cpuobj = object_new(typename);
- if (!vmc->disallow_affinity_adjustment) {
- /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the
- * GIC's target-list limitations. 32-bit KVM hosts currently
- * always create clusters of 4 CPUs, but that is expected to
- * change when they gain support for gicv3. When KVM is enabled
- * it will override the changes we make here, therefore our
- * purposes are to make TCG consistent (with 64-bit KVM hosts)
- * and to improve SGI efficiency.
- */
- uint8_t aff1 = n / clustersz;
- uint8_t aff0 = n % clustersz;
- object_property_set_int(cpuobj, (aff1 << ARM_AFF1_SHIFT) | aff0,
- "mp-affinity", NULL);
+ possible_cpus = mc->possible_cpu_arch_ids(machine);
+ for (n = 0; n < possible_cpus->len; n++) {
+ Object *cpuobj;
+ CPUState *cs;
+ int node_id;
+
+ if (n >= smp_cpus) {
+ break;
+ }
+
+ cpuobj = object_new(typename);
+ object_property_set_int(cpuobj, possible_cpus->cpus[n].arch_id,
+ "mp-affinity", NULL);
+
+ cs = CPU(cpuobj);
+ cs->cpu_index = n;
+
+ node_id = possible_cpus->cpus[cs->cpu_index].props.node_id;
+ if (!possible_cpus->cpus[cs->cpu_index].props.has_node_id) {
+ /* by default CPUState::numa_node was 0 if it's not set via CLI
+ * keep it this way for now but in future we probably should
+ * refuse to start up with incomplete numa mapping */
+ node_id = 0;
+ }
+ if (cs->numa_node == CPU_UNSET_NUMA_NODE_ID) {
+ cs->numa_node = node_id;
+ } else {
+ /* CPU isn't device_add compatible yet, this shouldn't happen */
+ error_setg(&error_abort, "user set node-id not implemented");
}
if (!vms->secure) {
@@ -1518,6 +1555,46 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp)
}
}
+static CpuInstanceProperties
+virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
+
+ assert(cpu_index < possible_cpus->len);
+ return possible_cpus->cpus[cpu_index].props;
+}
+
+static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
+{
+ int n;
+ VirtMachineState *vms = VIRT_MACHINE(ms);
+
+ if (ms->possible_cpus) {
+ assert(ms->possible_cpus->len == max_cpus);
+ return ms->possible_cpus;
+ }
+
+ ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * max_cpus);
+ ms->possible_cpus->len = max_cpus;
+ for (n = 0; n < ms->possible_cpus->len; n++) {
+ ms->possible_cpus->cpus[n].arch_id =
+ virt_cpu_mp_affinity(vms, n);
+ ms->possible_cpus->cpus[n].props.has_thread_id = true;
+ ms->possible_cpus->cpus[n].props.thread_id = n;
+
+ /* default distribution of CPUs over NUMA nodes */
+ if (nb_numa_nodes) {
+ /* preset values but do not enable them i.e. 'has_node_id = false',
+ * numa init code will enable them later if manual mapping wasn't
+ * present on CLI */
+ ms->possible_cpus->cpus[n].props.node_id = n % nb_numa_nodes;
+ }
+ }
+ return ms->possible_cpus;
+}
+
static void virt_machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -1534,6 +1611,8 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
mc->pci_allow_0_address = true;
/* We know we will never create a pre-ARMv7 CPU which needs 1K pages */
mc->minimum_page_bits = 12;
+ mc->possible_cpu_arch_ids = virt_possible_cpu_arch_ids;
+ mc->cpu_index_to_instance_props = virt_cpu_index_to_props;
}
static const TypeInfo virt_machine_info = {