diff options
author | Daniel Henrique Barboza <danielhb413@gmail.com> | 2021-01-28 14:42:12 -0300 |
---|---|---|
committer | David Gibson <david@gibson.dropbear.id.au> | 2021-02-10 10:43:50 +1100 |
commit | 6640706972c50aac4f620d7385d4e228a118e289 (patch) | |
tree | 3476fe9141e5c816d47b12814b4bd7e81786c0ac /hw/ppc | |
parent | 3b880445e61b6509a9a5b4236eaf07718ae4a51a (diff) |
spapr_numa.c: create spapr_numa_initial_nvgpu_numa_id() helper
We'll need to check the initial value given to spapr->gpu_numa_id when
building the rtas DT, so put it in a helper for easier access and to
avoid repetition.
Tested-by: Cédric Le Goater <clg@kaod.org>
Reviewed-by: Greg Kurz <groug@kaod.org>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20210128174213.1349181-3-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'hw/ppc')
-rw-r--r-- | hw/ppc/spapr.c | 11 | ||||
-rw-r--r-- | hw/ppc/spapr_numa.c | 14 |
2 files changed, 15 insertions, 10 deletions
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 8a1a979257..85fe65f894 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -2770,16 +2770,7 @@ static void spapr_machine_init(MachineState *machine) } - /* - * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. - * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is - * called from vPHB reset handler so we initialize the counter here. - * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM - * must be equally distant from any other node. - * The final value of spapr->gpu_numa_id is going to be written to - * max-associativity-domains in spapr_build_fdt(). - */ - spapr->gpu_numa_id = MAX(1, machine->numa_state->num_nodes); + spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine); /* Init numa_assoc_array */ spapr_numa_associativity_init(spapr, machine); diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c index 261810525b..a757dd88b8 100644 --- a/hw/ppc/spapr_numa.c +++ b/hw/ppc/spapr_numa.c @@ -47,6 +47,20 @@ static bool spapr_numa_is_symmetrical(MachineState *ms) } /* + * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. + * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is + * called from vPHB reset handler so we initialize the counter here. + * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM + * must be equally distant from any other node. + * The final value of spapr->gpu_numa_id is going to be written to + * max-associativity-domains in spapr_build_fdt(). + */ +unsigned int spapr_numa_initial_nvgpu_numa_id(MachineState *machine) +{ + return MAX(1, machine->numa_state->num_nodes); +} + +/* * This function will translate the user distances into * what the kernel understand as possible values: 10 * (local distance), 20, 40, 80 and 160, and return the equivalent |