aboutsummaryrefslogtreecommitdiff
path: root/target/arm
diff options
context:
space:
mode:
authorPhilippe Mathieu-Daudé <philmd@linaro.org>2023-06-21 13:15:27 +0200
committerPhilippe Mathieu-Daudé <philmd@linaro.org>2023-06-28 14:14:22 +0200
commit3b295bcb3289afec09508786032f4ba5d657a934 (patch)
tree37f11b0092601dd6303ab87a3b5c0fb2da713914 /target/arm
parenta7159244285058c049ad53a42b3dc7b24809faaa (diff)
accel: Rename HVF 'struct hvf_vcpu_state' -> AccelCPUState
We want all accelerators to share the same opaque pointer in CPUState. Rename the 'hvf_vcpu_state' structure as 'AccelCPUState'. Use the generic 'accel' field of CPUState instead of 'hvf'. Replace g_malloc0() by g_new0() for readability. Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Tested-by: Peter Maydell <peter.maydell@linaro.org> Message-Id: <20230624174121.11508-17-philmd@linaro.org>
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/hvf/hvf.c108
1 files changed, 54 insertions, 54 deletions
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index 8f72624586..8fce64bbf6 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -544,29 +544,29 @@ int hvf_get_registers(CPUState *cpu)
int i;
for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
- ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val);
+ ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val);
*(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
assert_hvf_ok(ret);
}
for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
- ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
+ ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
&fpval);
memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
assert_hvf_ok(ret);
}
val = 0;
- ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val);
+ ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val);
assert_hvf_ok(ret);
vfp_set_fpcr(env, val);
val = 0;
- ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val);
+ ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val);
assert_hvf_ok(ret);
vfp_set_fpsr(env, val);
- ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val);
+ ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val);
assert_hvf_ok(ret);
pstate_write(env, val);
@@ -575,7 +575,7 @@ int hvf_get_registers(CPUState *cpu)
continue;
}
- if (cpu->hvf->guest_debug_enabled) {
+ if (cpu->accel->guest_debug_enabled) {
/* Handle debug registers */
switch (hvf_sreg_match[i].reg) {
case HV_SYS_REG_DBGBVR0_EL1:
@@ -661,7 +661,7 @@ int hvf_get_registers(CPUState *cpu)
}
}
- ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val);
+ ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val);
assert_hvf_ok(ret);
arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
@@ -684,24 +684,24 @@ int hvf_put_registers(CPUState *cpu)
for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
- ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val);
+ ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val);
assert_hvf_ok(ret);
}
for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
- ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
+ ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
fpval);
assert_hvf_ok(ret);
}
- ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env));
+ ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env));
assert_hvf_ok(ret);
- ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env));
+ ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env));
assert_hvf_ok(ret);
- ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env));
+ ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env));
assert_hvf_ok(ret);
aarch64_save_sp(env, arm_current_el(env));
@@ -712,7 +712,7 @@ int hvf_put_registers(CPUState *cpu)
continue;
}
- if (cpu->hvf->guest_debug_enabled) {
+ if (cpu->accel->guest_debug_enabled) {
/* Handle debug registers */
switch (hvf_sreg_match[i].reg) {
case HV_SYS_REG_DBGBVR0_EL1:
@@ -789,11 +789,11 @@ int hvf_put_registers(CPUState *cpu)
}
val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
- ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val);
+ ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val);
assert_hvf_ok(ret);
}
- ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset);
+ ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset);
assert_hvf_ok(ret);
return 0;
@@ -814,7 +814,7 @@ static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
flush_cpu_state(cpu);
if (rt < 31) {
- r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val);
+ r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val);
assert_hvf_ok(r);
}
}
@@ -827,7 +827,7 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
flush_cpu_state(cpu);
if (rt < 31) {
- r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val);
+ r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val);
assert_hvf_ok(r);
}
@@ -969,22 +969,22 @@ int hvf_arch_init_vcpu(CPUState *cpu)
assert(write_cpustate_to_list(arm_cpu, false));
/* Set CP_NO_RAW system registers on init */
- ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1,
+ ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1,
arm_cpu->midr);
assert_hvf_ok(ret);
- ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1,
+ ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1,
arm_cpu->mp_affinity);
assert_hvf_ok(ret);
- ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
+ ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
assert_hvf_ok(ret);
pfr |= env->gicv3state ? (1 << 24) : 0;
- ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
+ ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
assert_hvf_ok(ret);
/* We're limited to underlying hardware caps, override internal versions */
- ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
+ ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
&arm_cpu->isar.id_aa64mmfr0);
assert_hvf_ok(ret);
@@ -994,7 +994,7 @@ int hvf_arch_init_vcpu(CPUState *cpu)
void hvf_kick_vcpu_thread(CPUState *cpu)
{
cpus_kick_thread(cpu);
- hv_vcpus_exit(&cpu->hvf->fd, 1);
+ hv_vcpus_exit(&cpu->accel->fd, 1);
}
static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
@@ -1678,13 +1678,13 @@ static int hvf_inject_interrupts(CPUState *cpu)
{
if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
trace_hvf_inject_fiq();
- hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ,
+ hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ,
true);
}
if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
trace_hvf_inject_irq();
- hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ,
+ hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ,
true);
}
@@ -1718,7 +1718,7 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
*/
qatomic_set_mb(&cpu->thread_kicked, false);
qemu_mutex_unlock_iothread();
- pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
+ pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
qemu_mutex_lock_iothread();
}
@@ -1739,7 +1739,7 @@ static void hvf_wfi(CPUState *cpu)
return;
}
- r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
+ r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
assert_hvf_ok(r);
if (!(ctl & 1) || (ctl & 2)) {
@@ -1748,7 +1748,7 @@ static void hvf_wfi(CPUState *cpu)
return;
}
- r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
+ r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
assert_hvf_ok(r);
ticks_to_sleep = cval - hvf_vtimer_val();
@@ -1781,12 +1781,12 @@ static void hvf_sync_vtimer(CPUState *cpu)
uint64_t ctl;
bool irq_state;
- if (!cpu->hvf->vtimer_masked) {
+ if (!cpu->accel->vtimer_masked) {
/* We will get notified on vtimer changes by hvf, nothing to do */
return;
}
- r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
+ r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
assert_hvf_ok(r);
irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
@@ -1795,8 +1795,8 @@ static void hvf_sync_vtimer(CPUState *cpu)
if (!irq_state) {
/* Timer no longer asserting, we can unmask it */
- hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
- cpu->hvf->vtimer_masked = false;
+ hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
+ cpu->accel->vtimer_masked = false;
}
}
@@ -1805,7 +1805,7 @@ int hvf_vcpu_exec(CPUState *cpu)
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
int ret;
- hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
+ hv_vcpu_exit_t *hvf_exit = cpu->accel->exit;
hv_return_t r;
bool advance_pc = false;
@@ -1821,7 +1821,7 @@ int hvf_vcpu_exec(CPUState *cpu)
flush_cpu_state(cpu);
qemu_mutex_unlock_iothread();
- assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
+ assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
/* handle VMEXIT */
uint64_t exit_reason = hvf_exit->reason;
@@ -1836,7 +1836,7 @@ int hvf_vcpu_exec(CPUState *cpu)
break;
case HV_EXIT_REASON_VTIMER_ACTIVATED:
qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
- cpu->hvf->vtimer_masked = true;
+ cpu->accel->vtimer_masked = true;
return 0;
case HV_EXIT_REASON_CANCELED:
/* we got kicked, no exit to process */
@@ -1990,10 +1990,10 @@ int hvf_vcpu_exec(CPUState *cpu)
flush_cpu_state(cpu);
- r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc);
+ r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc);
assert_hvf_ok(r);
pc += 4;
- r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
+ r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc);
assert_hvf_ok(r);
/* Handle single-stepping over instructions which trigger a VM exit */
@@ -2113,29 +2113,29 @@ static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
for (i = 0; i < cur_hw_bps; i++) {
HWBreakpoint *bp = get_hw_bp(i);
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], bp->bcr);
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], bp->bcr);
assert_hvf_ok(r);
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], bp->bvr);
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], bp->bvr);
assert_hvf_ok(r);
}
for (i = cur_hw_bps; i < max_hw_bps; i++) {
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], 0);
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], 0);
assert_hvf_ok(r);
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], 0);
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], 0);
assert_hvf_ok(r);
}
for (i = 0; i < cur_hw_wps; i++) {
HWWatchpoint *wp = get_hw_wp(i);
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], wp->wcr);
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], wp->wcr);
assert_hvf_ok(r);
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], wp->wvr);
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], wp->wvr);
assert_hvf_ok(r);
}
for (i = cur_hw_wps; i < max_hw_wps; i++) {
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], 0);
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], 0);
assert_hvf_ok(r);
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], 0);
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], 0);
assert_hvf_ok(r);
}
}
@@ -2152,19 +2152,19 @@ static void hvf_put_guest_debug_registers(CPUState *cpu)
int i;
for (i = 0; i < max_hw_bps; i++) {
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i],
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i],
env->cp15.dbgbcr[i]);
assert_hvf_ok(r);
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i],
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i],
env->cp15.dbgbvr[i]);
assert_hvf_ok(r);
}
for (i = 0; i < max_hw_wps; i++) {
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i],
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i],
env->cp15.dbgwcr[i]);
assert_hvf_ok(r);
- r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i],
+ r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i],
env->cp15.dbgwvr[i]);
assert_hvf_ok(r);
}
@@ -2184,16 +2184,16 @@ static void hvf_arch_set_traps(void)
/* Check whether guest debugging is enabled for at least one vCPU; if it
* is, enable exiting the guest on all vCPUs */
CPU_FOREACH(cpu) {
- should_enable_traps |= cpu->hvf->guest_debug_enabled;
+ should_enable_traps |= cpu->accel->guest_debug_enabled;
}
CPU_FOREACH(cpu) {
/* Set whether debug exceptions exit the guest */
- r = hv_vcpu_set_trap_debug_exceptions(cpu->hvf->fd,
+ r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
should_enable_traps);
assert_hvf_ok(r);
/* Set whether accesses to debug registers exit the guest */
- r = hv_vcpu_set_trap_debug_reg_accesses(cpu->hvf->fd,
+ r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
should_enable_traps);
assert_hvf_ok(r);
}
@@ -2205,12 +2205,12 @@ void hvf_arch_update_guest_debug(CPUState *cpu)
CPUARMState *env = &arm_cpu->env;
/* Check whether guest debugging is enabled */
- cpu->hvf->guest_debug_enabled = cpu->singlestep_enabled ||
+ cpu->accel->guest_debug_enabled = cpu->singlestep_enabled ||
hvf_sw_breakpoints_active(cpu) ||
hvf_arm_hw_debug_active(cpu);
/* Update debug registers */
- if (cpu->hvf->guest_debug_enabled) {
+ if (cpu->accel->guest_debug_enabled) {
hvf_put_gdbstub_debug_registers(cpu);
} else {
hvf_put_guest_debug_registers(cpu);