aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/mips/mips_int.c12
-rw-r--r--hw/misc/mips_cpc.c17
2 files changed, 27 insertions, 2 deletions
diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c
index 48192d22f3..5ddeb15848 100644
--- a/hw/mips/mips_int.c
+++ b/hw/mips/mips_int.c
@@ -21,6 +21,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
#include "hw/hw.h"
#include "hw/mips/cpudevs.h"
#include "cpu.h"
@@ -32,10 +33,17 @@ static void cpu_mips_irq_request(void *opaque, int irq, int level)
MIPSCPU *cpu = opaque;
CPUMIPSState *env = &cpu->env;
CPUState *cs = CPU(cpu);
+ bool locked = false;
if (irq < 0 || irq > 7)
return;
+ /* Make sure locking works even if BQL is already held by the caller */
+ if (!qemu_mutex_iothread_locked()) {
+ locked = true;
+ qemu_mutex_lock_iothread();
+ }
+
if (level) {
env->CP0_Cause |= 1 << (irq + CP0Ca_IP);
@@ -56,6 +64,10 @@ static void cpu_mips_irq_request(void *opaque, int irq, int level)
} else {
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
}
+
+ if (locked) {
+ qemu_mutex_unlock_iothread();
+ }
}
void cpu_mips_irq_init_cpu(MIPSCPU *cpu)
diff --git a/hw/misc/mips_cpc.c b/hw/misc/mips_cpc.c
index 6d345745f6..712d8423a7 100644
--- a/hw/misc/mips_cpc.c
+++ b/hw/misc/mips_cpc.c
@@ -30,6 +30,14 @@ static inline uint64_t cpc_vp_run_mask(MIPSCPCState *cpc)
return (1ULL << cpc->num_vp) - 1;
}
+static void mips_cpu_reset_async_work(CPUState *cs, run_on_cpu_data data)
+{
+ MIPSCPCState *cpc = (MIPSCPCState *) data.host_ptr;
+
+ cpu_reset(cs);
+ cpc->vp_running |= 1ULL << cs->cpu_index;
+}
+
static void cpc_run_vp(MIPSCPCState *cpc, uint64_t vp_run)
{
CPUState *cs = first_cpu;
@@ -37,8 +45,13 @@ static void cpc_run_vp(MIPSCPCState *cpc, uint64_t vp_run)
CPU_FOREACH(cs) {
uint64_t i = 1ULL << cs->cpu_index;
if (i & vp_run & ~cpc->vp_running) {
- cpu_reset(cs);
- cpc->vp_running |= i;
+ /*
+ * To avoid racing with a CPU we are just kicking off.
+ * We do the final bit of preparation for the work in
+ * the target CPUs context.
+ */
+ async_safe_run_on_cpu(cs, mips_cpu_reset_async_work,
+ RUN_ON_CPU_HOST_PTR(cpc));
}
}
}