aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/intc/spapr_xive.c47
-rw-r--r--hw/intc/spapr_xive_kvm.c5
-rw-r--r--hw/intc/trace-events33
-rw-r--r--hw/intc/xics_kvm.c2
-rw-r--r--hw/intc/xive.c40
-rw-r--r--hw/mips/addr.c51
-rw-r--r--hw/mips/boston.c5
-rw-r--r--hw/mips/cps.c3
-rw-r--r--hw/mips/malta.c14
-rw-r--r--hw/mips/meson.build2
-rw-r--r--hw/ppc/e500.c1
-rw-r--r--hw/ppc/ppc.c6
-rw-r--r--hw/ppc/spapr.c186
-rw-r--r--hw/ppc/spapr_drc.c8
-rw-r--r--hw/ppc/spapr_events.c21
-rw-r--r--hw/ppc/spapr_hcall.c7
-rw-r--r--hw/ppc/spapr_irq.c2
-rw-r--r--hw/ppc/spapr_nvdimm.c11
-rw-r--r--hw/ppc/spapr_pci.c48
-rw-r--r--hw/ppc/trace-events2
20 files changed, 294 insertions, 200 deletions
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
index 1fa09f287a..caedd312d7 100644
--- a/hw/intc/spapr_xive.c
+++ b/hw/intc/spapr_xive.c
@@ -24,6 +24,7 @@
#include "hw/ppc/xive.h"
#include "hw/ppc/xive_regs.h"
#include "hw/qdev-properties.h"
+#include "trace.h"
/*
* XIVE Virtualization Controller BAR and Thread Managment BAR that we
@@ -296,22 +297,16 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
XiveENDSource *end_xsrc = &xive->end_source;
Error *local_err = NULL;
+ /* Set by spapr_irq_init() */
+ g_assert(xive->nr_irqs);
+ g_assert(xive->nr_ends);
+
sxc->parent_realize(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
- if (!xive->nr_irqs) {
- error_setg(errp, "Number of interrupt needs to be greater 0");
- return;
- }
-
- if (!xive->nr_ends) {
- error_setg(errp, "Number of interrupt needs to be greater 0");
- return;
- }
-
/*
* Initialize the internal sources, for IPIs and virtual devices.
*/
@@ -562,6 +557,8 @@ static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
assert(lisn < xive->nr_irqs);
+ trace_spapr_xive_claim_irq(lisn, lsi);
+
if (xive_eas_is_valid(&xive->eat[lisn])) {
error_setg(errp, "IRQ %d is not free", lisn);
return -EBUSY;
@@ -587,6 +584,8 @@ static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
SpaprXive *xive = SPAPR_XIVE(intc);
assert(lisn < xive->nr_irqs);
+ trace_spapr_xive_free_irq(lisn);
+
xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
}
@@ -653,6 +652,8 @@ static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
{
SpaprXive *xive = SPAPR_XIVE(intc);
+ trace_spapr_xive_set_irq(irq, val);
+
if (spapr_xive_in_kernel(xive)) {
kvmppc_xive_source_set_irq(&xive->source, irq, val);
} else {
@@ -900,6 +901,8 @@ static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
target_ulong flags = args[0];
target_ulong lisn = args[1];
+ trace_spapr_xive_get_source_info(flags, lisn);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1015,6 +1018,8 @@ static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
uint8_t end_blk;
uint32_t end_idx;
+ trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1120,6 +1125,8 @@ static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
uint8_t nvt_blk;
uint32_t end_idx, nvt_idx;
+ trace_spapr_xive_get_source_config(flags, lisn);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1194,6 +1201,8 @@ static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
uint8_t end_blk;
uint32_t end_idx;
+ trace_spapr_xive_get_queue_info(flags, target, priority);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1281,6 +1290,8 @@ static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
uint8_t end_blk, nvt_blk;
uint32_t end_idx, nvt_idx;
+ trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1448,6 +1459,8 @@ static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
uint8_t end_blk;
uint32_t end_idx;
+ trace_spapr_xive_get_queue_config(flags, target, priority);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1541,6 +1554,10 @@ static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
target_ulong opcode,
target_ulong *args)
{
+ target_ulong flags = args[0];
+
+ trace_spapr_xive_set_os_reporting_line(flags);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1577,6 +1594,10 @@ static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
target_ulong opcode,
target_ulong *args)
{
+ target_ulong flags = args[0];
+
+ trace_spapr_xive_get_os_reporting_line(flags);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1629,6 +1650,8 @@ static target_ulong h_int_esb(PowerPCCPU *cpu,
hwaddr mmio_addr;
XiveSource *xsrc = &xive->source;
+ trace_spapr_xive_esb(flags, lisn, offset, data);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1698,6 +1721,8 @@ static target_ulong h_int_sync(PowerPCCPU *cpu,
target_ulong flags = args[0];
target_ulong lisn = args[1];
+ trace_spapr_xive_sync(flags, lisn);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
@@ -1763,6 +1788,8 @@ static target_ulong h_int_reset(PowerPCCPU *cpu,
SpaprXive *xive = spapr->xive;
target_ulong flags = args[0];
+ trace_spapr_xive_reset(flags);
+
if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
return H_FUNCTION;
}
diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c
index e8667ce5f6..acc8c3650c 100644
--- a/hw/intc/spapr_xive_kvm.c
+++ b/hw/intc/spapr_xive_kvm.c
@@ -20,6 +20,7 @@
#include "hw/ppc/spapr_xive.h"
#include "hw/ppc/xive.h"
#include "kvm_ppc.h"
+#include "trace.h"
#include <sys/ioctl.h>
@@ -163,6 +164,8 @@ int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
vcpu_id = kvm_arch_vcpu_id(tctx->cs);
+ trace_kvm_xive_cpu_connect(vcpu_id);
+
ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd,
vcpu_id, 0);
if (ret < 0) {
@@ -308,6 +311,8 @@ uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
return xive_esb_rw(xsrc, srcno, offset, data, 1);
}
+ trace_kvm_xive_source_reset(srcno);
+
/*
* Special Load EOI handling for LSI sources. Q bit is never set
* and the interrupt should be re-triggered if the level is still
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index 22782b3f08..8ed397a0d5 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -203,3 +203,36 @@ heathrow_set_irq(int num, int level) "set_irq: num=0x%02x level=%d"
# bcm2835_ic.c
bcm2835_ic_set_gpu_irq(int irq, int level) "GPU irq #%d level %d"
bcm2835_ic_set_cpu_irq(int irq, int level) "CPU irq #%d level %d"
+
+# spapr_xive.c
+spapr_xive_claim_irq(uint32_t lisn, bool lsi) "lisn=0x%x lsi=%d"
+spapr_xive_free_irq(uint32_t lisn) "lisn=0x%x"
+spapr_xive_set_irq(uint32_t lisn, uint32_t val) "lisn=0x%x val=%d"
+spapr_xive_get_source_info(uint64_t flags, uint64_t lisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64
+spapr_xive_set_source_config(uint64_t flags, uint64_t lisn, uint64_t target, uint64_t priority, uint64_t eisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64" eisn=0x%"PRIx64
+spapr_xive_get_source_config(uint64_t flags, uint64_t lisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64
+spapr_xive_get_queue_info(uint64_t flags, uint64_t target, uint64_t priority) "flags=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64
+spapr_xive_set_queue_config(uint64_t flags, uint64_t target, uint64_t priority, uint64_t qpage, uint64_t qsize) "flags=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64" qpage=0x%"PRIx64" qsize=0x%"PRIx64
+spapr_xive_get_queue_config(uint64_t flags, uint64_t target, uint64_t priority) "flags=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64
+spapr_xive_set_os_reporting_line(uint64_t flags) "flags=0x%"PRIx64
+spapr_xive_get_os_reporting_line(uint64_t flags) "flags=0x%"PRIx64
+spapr_xive_esb(uint64_t flags, uint64_t lisn, uint64_t offset, uint64_t data) "flags=0x%"PRIx64" lisn=0x%"PRIx64" offset=0x%"PRIx64" data=0x%"PRIx64
+spapr_xive_sync(uint64_t flags, uint64_t lisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64
+spapr_xive_reset(uint64_t flags) "flags=0x%"PRIx64
+
+# spapr_xive_kvm.c
+kvm_xive_cpu_connect(uint32_t id) "connect CPU%d to KVM device"
+kvm_xive_source_reset(uint32_t srcno) "IRQ 0x%x"
+
+# xive.c
+xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK"
+xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !"
+xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x"
+xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x0x%"PRIx64" IRQ 0x%x val=0x0x%"PRIx64
+xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x0x%"PRIx64" IRQ 0x%x val=0x0x%"PRIx64
+xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "END 0x%02x/0x%04x -> enqueue 0x%08x"
+xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x"
+xive_tctx_tm_write(uint64_t offset, unsigned int size, uint64_t value) "@0x0x%"PRIx64" sz=%d val=0x%" PRIx64
+xive_tctx_tm_read(uint64_t offset, unsigned int size, uint64_t value) "@0x0x%"PRIx64" sz=%d val=0x%" PRIx64
+xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x"
+xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x0x%"PRIx64
diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c
index 68bb1914b9..570d635bcc 100644
--- a/hw/intc/xics_kvm.c
+++ b/hw/intc/xics_kvm.c
@@ -484,7 +484,7 @@ void xics_kvm_disconnect(SpaprInterruptController *intc)
* support destruction of a KVM XICS device while the VM is running.
* Required to start a spapr machine with ic-mode=dual,kernel-irqchip=on.
*/
-bool xics_kvm_has_broken_disconnect(SpaprMachineState *spapr)
+bool xics_kvm_has_broken_disconnect(void)
{
int rc;
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index 489e6256ef..fa8c3d8287 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -21,6 +21,7 @@
#include "hw/irq.h"
#include "hw/ppc/xive.h"
#include "hw/ppc/xive_regs.h"
+#include "trace.h"
/*
* XIVE Thread Interrupt Management context
@@ -93,6 +94,10 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
/* Drop Exception bit */
regs[TM_NSR] &= ~mask;
+
+ trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
+ regs[TM_IPB], regs[TM_PIPR],
+ regs[TM_CPPR], regs[TM_NSR]);
}
return (nsr << 8) | regs[TM_CPPR];
@@ -113,12 +118,21 @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
default:
g_assert_not_reached();
}
+ trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
+ regs[TM_IPB], regs[TM_PIPR],
+ regs[TM_CPPR], regs[TM_NSR]);
qemu_irq_raise(xive_tctx_output(tctx, ring));
}
}
static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
{
+ uint8_t *regs = &tctx->regs[ring];
+
+ trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
+ regs[TM_IPB], regs[TM_PIPR],
+ cppr, regs[TM_NSR]);
+
if (cppr > XIVE_PRIORITY_MAX) {
cppr = 0xff;
}
@@ -508,6 +522,8 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
{
const XiveTmOp *xto;
+ trace_xive_tctx_tm_write(offset, size, value);
+
/*
* TODO: check V bit in Q[0-3]W2
*/
@@ -545,6 +561,7 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
unsigned size)
{
const XiveTmOp *xto;
+ uint64_t ret;
/*
* TODO: check V bit in Q[0-3]W2
@@ -560,7 +577,8 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
"@%"HWADDR_PRIx"\n", offset);
return -1;
}
- return xto->read_handler(xptr, tctx, offset, size);
+ ret = xto->read_handler(xptr, tctx, offset, size);
+ goto out;
}
/*
@@ -568,13 +586,17 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
*/
xto = xive_tm_find_op(offset, size, false);
if (xto) {
- return xto->read_handler(xptr, tctx, offset, size);
+ ret = xto->read_handler(xptr, tctx, offset, size);
+ goto out;
}
/*
* Finish with raw access to the register values
*/
- return xive_tm_raw_read(tctx, offset, size);
+ ret = xive_tm_raw_read(tctx, offset, size);
+out:
+ trace_xive_tctx_tm_read(offset, size, ret);
+ return ret;
}
static char *xive_tctx_ring_print(uint8_t *ring)
@@ -1005,6 +1027,8 @@ static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
offset);
}
+ trace_xive_source_esb_read(addr, srcno, ret);
+
return ret;
}
@@ -1030,6 +1054,8 @@ static void xive_source_esb_write(void *opaque, hwaddr addr,
uint32_t srcno = addr >> xsrc->esb_shift;
bool notify = false;
+ trace_xive_source_esb_write(addr, srcno, value);
+
/* In a two pages ESB MMIO setting, trigger page only triggers */
if (xive_source_is_trigger_page(xsrc, addr)) {
notify = xive_source_esb_trigger(xsrc, srcno);
@@ -1507,6 +1533,7 @@ static bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
/* handle CPU exception delivery */
if (count) {
+ trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring);
xive_tctx_ipb_update(match.tctx, match.ring, priority_to_ipb(priority));
}
@@ -1558,6 +1585,7 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
}
if (!xive_end_is_valid(&end)) {
+ trace_xive_router_end_notify(end_blk, end_idx, end_data);
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
end_blk, end_idx);
return;
@@ -1683,6 +1711,10 @@ do_escalation:
}
}
+ trace_xive_router_end_escalate(end_blk, end_idx,
+ (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
+ (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
+ (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5));
/*
* The END trigger becomes an Escalation trigger
*/
@@ -1796,6 +1828,8 @@ static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
end_blk = xive_router_get_block_id(xsrc->xrtr);
end_idx = addr >> (xsrc->esb_shift + 1);
+ trace_xive_end_source_read(end_blk, end_idx, addr);
+
if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
end_idx);
diff --git a/hw/mips/addr.c b/hw/mips/addr.c
deleted file mode 100644
index 2f138fe1ea..0000000000
--- a/hw/mips/addr.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * QEMU MIPS address translation support
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "hw/mips/cpudevs.h"
-
-static int mips_um_ksegs;
-
-uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr)
-{
- return addr & 0x1fffffffll;
-}
-
-uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr)
-{
- return addr | ~0x7fffffffll;
-}
-
-uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr)
-{
- return addr | 0x40000000ll;
-}
-
-bool mips_um_ksegs_enabled(void)
-{
- return mips_um_ksegs;
-}
-
-void mips_um_ksegs_enable(void)
-{
- mips_um_ksegs = 1;
-}
diff --git a/hw/mips/boston.c b/hw/mips/boston.c
index 3d40867dc4..c3b94c68e1 100644
--- a/hw/mips/boston.c
+++ b/hw/mips/boston.c
@@ -28,7 +28,6 @@
#include "hw/loader.h"
#include "hw/loader-fit.h"
#include "hw/mips/cps.h"
-#include "hw/mips/cpudevs.h"
#include "hw/pci-host/xilinx-pcie.h"
#include "hw/qdev-clock.h"
#include "hw/qdev-properties.h"
@@ -459,12 +458,12 @@ static void boston_mach_init(MachineState *machine)
s = BOSTON(dev);
s->mach = machine;
- if (!cpu_supports_cps_smp(machine->cpu_type)) {
+ if (!cpu_type_supports_cps_smp(machine->cpu_type)) {
error_report("Boston requires CPUs which support CPS");
exit(1);
}
- is_64b = cpu_supports_isa(machine->cpu_type, ISA_MIPS64);
+ is_64b = cpu_type_supports_isa(machine->cpu_type, ISA_MIPS64);
object_initialize_child(OBJECT(machine), "cps", &s->cps, TYPE_MIPS_CPS);
object_property_set_str(OBJECT(&s->cps), "cpu-type", machine->cpu_type,
diff --git a/hw/mips/cps.c b/hw/mips/cps.c
index 962b1b0b87..7a0d289efa 100644
--- a/hw/mips/cps.c
+++ b/hw/mips/cps.c
@@ -58,8 +58,7 @@ static void main_cpu_reset(void *opaque)
static bool cpu_mips_itu_supported(CPUMIPSState *env)
{
- bool is_mt = (env->CP0_Config5 & (1 << CP0C5_VP)) ||
- (env->CP0_Config3 & (1 << CP0C3_MT));
+ bool is_mt = (env->CP0_Config5 & (1 << CP0C5_VP)) || ase_mt_available(env);
return is_mt && !kvm_enabled();
}
diff --git a/hw/mips/malta.c b/hw/mips/malta.c
index 5c11eecec1..366f4fdfcd 100644
--- a/hw/mips/malta.c
+++ b/hw/mips/malta.c
@@ -24,6 +24,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "qemu/bitops.h"
#include "qemu-common.h"
#include "qemu/datadir.h"
#include "cpu.h"
@@ -1135,8 +1136,13 @@ static void malta_mips_config(MIPSCPU *cpu)
CPUMIPSState *env = &cpu->env;
CPUState *cs = CPU(cpu);
- env->mvp->CP0_MVPConf0 |= ((smp_cpus - 1) << CP0MVPC0_PVPE) |
- ((smp_cpus * cs->nr_threads - 1) << CP0MVPC0_PTC);
+ if (ase_mt_available(env)) {
+ env->mvp->CP0_MVPConf0 = deposit32(env->mvp->CP0_MVPConf0,
+ CP0MVPC0_PTC, 8,
+ smp_cpus * cs->nr_threads - 1);
+ env->mvp->CP0_MVPConf0 = deposit32(env->mvp->CP0_MVPConf0,
+ CP0MVPC0_PVPE, 4, smp_cpus - 1);
+ }
}
static void main_cpu_reset(void *opaque)
@@ -1205,7 +1211,7 @@ static void create_cps(MachineState *ms, MaltaState *s,
static void mips_create_cpu(MachineState *ms, MaltaState *s,
qemu_irq *cbus_irq, qemu_irq *i8259_irq)
{
- if ((ms->smp.cpus > 1) && cpu_supports_cps_smp(ms->cpu_type)) {
+ if ((ms->smp.cpus > 1) && cpu_type_supports_cps_smp(ms->cpu_type)) {
create_cps(ms, s, cbus_irq, i8259_irq);
} else {
create_cpu_without_cps(ms, s, cbus_irq, i8259_irq);
@@ -1309,7 +1315,7 @@ void mips_malta_init(MachineState *machine)
loaderparams.initrd_filename = initrd_filename;
kernel_entry = load_kernel();
- if (!cpu_supports_isa(machine->cpu_type, ISA_NANOMIPS32)) {
+ if (!cpu_type_supports_isa(machine->cpu_type, ISA_NANOMIPS32)) {
write_bootloader(memory_region_get_ram_ptr(bios),
bootloader_run_addr, kernel_entry);
} else {
diff --git a/hw/mips/meson.build b/hw/mips/meson.build
index bcdf96be69..77b4d8f365 100644
--- a/hw/mips/meson.build
+++ b/hw/mips/meson.build
@@ -1,5 +1,5 @@
mips_ss = ss.source_set()
-mips_ss.add(files('addr.c', 'mips_int.c'))
+mips_ss.add(files('mips_int.c'))
mips_ss.add(when: 'CONFIG_FULOONG', if_true: files('fuloong2e.c'))
mips_ss.add(when: 'CONFIG_JAZZ', if_true: files('jazz.c'))
mips_ss.add(when: 'CONFIG_MALTA', if_true: files('gt64xxx_pci.c', 'malta.c'))
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index 6a64eb31ab..072e558c2d 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -926,6 +926,7 @@ void ppce500_init(MachineState *machine)
ccsr_addr_space);
mpicdev = ppce500_init_mpic(pms, ccsr_addr_space, irqs);
+ g_free(irqs);
/* Serial */
if (serial_hd(0)) {
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index 4a11fb1640..5cbbff1f8d 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -120,6 +120,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level)
} else {
cpu_ppc_tb_stop(env);
}
+ break;
case PPC6xx_INPUT_INT:
/* Level sensitive - active high */
LOG_IRQ("%s: set the external IRQ state to %d\n",
@@ -1026,7 +1027,8 @@ static void timebase_save(PPCTimebase *tb)
*/
tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
- tb->runstate_paused = runstate_check(RUN_STATE_PAUSED);
+ tb->runstate_paused =
+ runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
}
static void timebase_load(PPCTimebase *tb)
@@ -1087,7 +1089,7 @@ static int timebase_pre_save(void *opaque)
{
PPCTimebase *tb = opaque;
- /* guest_timebase won't be overridden in case of paused guest */
+ /* guest_timebase won't be overridden in case of paused guest or savevm */
if (!tb->runstate_paused) {
timebase_save(tb);
}
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 6abb45d0ed..dee48a0043 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -3021,17 +3021,25 @@ static void spapr_machine_init(MachineState *machine)
qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond);
}
+#define DEFAULT_KVM_TYPE "auto"
static int spapr_kvm_type(MachineState *machine, const char *vm_type)
{
- if (!vm_type) {
+ /*
+ * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to
+ * accomodate the 'HV' and 'PV' formats that exists in the
+ * wild. The 'auto' mode is being introduced already as
+ * lower-case, thus we don't need to bother checking for
+ * "AUTO".
+ */
+ if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) {
return 0;
}
- if (!strcmp(vm_type, "HV")) {
+ if (!g_ascii_strcasecmp(vm_type, "hv")) {
return 1;
}
- if (!strcmp(vm_type, "PR")) {
+ if (!g_ascii_strcasecmp(vm_type, "pr")) {
return 2;
}
@@ -3270,10 +3278,15 @@ static void spapr_instance_init(Object *obj)
spapr->htab_fd = -1;
spapr->use_hotplug_event_source = true;
+ spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE);
object_property_add_str(obj, "kvm-type",
spapr_get_kvm_type, spapr_set_kvm_type);
object_property_set_description(obj, "kvm-type",
- "Specifies the KVM virtualization mode (HV, PR)");
+ "Specifies the KVM virtualization mode (auto,"
+ " hv, pr). Defaults to 'auto'. This mode will use"
+ " any available KVM module loaded in the host,"
+ " where kvm_hv takes precedence if both kvm_hv and"
+ " kvm_pr are loaded.");
object_property_add_bool(obj, "modern-hotplug-events",
spapr_get_modern_hotplug_events,
spapr_set_modern_hotplug_events);
@@ -3379,8 +3392,8 @@ int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
return 0;
}
-static bool spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
- bool dedicated_hp_event_source, Error **errp)
+static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
+ bool dedicated_hp_event_source)
{
SpaprDrc *drc;
uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
@@ -3393,15 +3406,12 @@ static bool spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
addr / SPAPR_MEMORY_BLOCK_SIZE);
g_assert(drc);
- if (!spapr_drc_attach(drc, dev, errp)) {
- while (addr > addr_start) {
- addr -= SPAPR_MEMORY_BLOCK_SIZE;
- drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
- addr / SPAPR_MEMORY_BLOCK_SIZE);
- spapr_drc_detach(drc);
- }
- return false;
- }
+ /*
+ * memory_device_get_free_addr() provided a range of free addresses
+ * that doesn't overlap with any existing mapping at pre-plug. The
+ * corresponding LMB DRCs are thus assumed to be all attachable.
+ */
+ spapr_drc_attach(drc, dev);
if (!hotplugged) {
spapr_drc_reset(drc);
}
@@ -3422,11 +3432,9 @@ static bool spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
nr_lmbs);
}
}
- return true;
}
-static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
+static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
{
SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev);
PCDIMMDevice *dimm = PC_DIMM(dev);
@@ -3441,24 +3449,15 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
if (!is_nvdimm) {
addr = object_property_get_uint(OBJECT(dimm),
PC_DIMM_ADDR_PROP, &error_abort);
- if (!spapr_add_lmbs(dev, addr, size,
- spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT), errp)) {
- goto out_unplug;
- }
+ spapr_add_lmbs(dev, addr, size,
+ spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT));
} else {
slot = object_property_get_int(OBJECT(dimm),
PC_DIMM_SLOT_PROP, &error_abort);
/* We should have valid slot number at this point */
g_assert(slot >= 0);
- if (!spapr_add_nvdimm(dev, slot, errp)) {
- goto out_unplug;
- }
+ spapr_add_nvdimm(dev, slot);
}
-
- return;
-
-out_unplug:
- pc_dimm_unplug(dimm, MACHINE(ms));
}
static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
@@ -3752,8 +3751,7 @@ int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
return 0;
}
-static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
+static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
{
SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
MachineClass *mc = MACHINE_GET_CLASS(spapr);
@@ -3768,20 +3766,20 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
int i;
core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
- if (!core_slot) {
- error_setg(errp, "Unable to find CPU core with core-id: %d",
- cc->core_id);
- return;
- }
+ g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */
+
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
spapr_vcpu_id(spapr, cc->core_id));
g_assert(drc || !mc->has_hotpluggable_cpus);
if (drc) {
- if (!spapr_drc_attach(drc, dev, errp)) {
- return;
- }
+ /*
+ * spapr_core_pre_plug() already buys us this is a brand new
+ * core being plugged into a free slot. Nothing should already
+ * be attached to the corresponding DRC.
+ */
+ spapr_drc_attach(drc, dev);
if (hotplugged) {
/*
@@ -3796,24 +3794,22 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
core_slot->cpu = OBJECT(dev);
- if (smc->pre_2_10_has_unused_icps) {
- for (i = 0; i < cc->nr_threads; i++) {
- cs = CPU(core->threads[i]);
- pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
- }
- }
-
/*
* Set compatibility mode to match the boot CPU, which was either set
- * by the machine reset code or by CAS.
+ * by the machine reset code or by CAS. This really shouldn't fail at
+ * this point.
*/
if (hotplugged) {
for (i = 0; i < cc->nr_threads; i++) {
- if (ppc_set_compat(core->threads[i],
- POWERPC_CPU(first_cpu)->compat_pvr,
- errp) < 0) {
- return;
- }
+ ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr,
+ &error_abort);
+ }
+ }
+
+ if (smc->pre_2_10_has_unused_icps) {
+ for (i = 0; i < cc->nr_threads; i++) {
+ cs = CPU(core->threads[i]);
+ pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
}
}
}
@@ -3893,38 +3889,45 @@ int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
return 0;
}
-static void spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp)
{
SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
const unsigned windows_supported = spapr_phb_windows_supported(sphb);
+ SpaprDrc *drc;
if (dev->hotplugged && !smc->dr_phb_enabled) {
error_setg(errp, "PHB hotplug not supported for this machine");
- return;
+ return false;
}
if (sphb->index == (uint32_t)-1) {
error_setg(errp, "\"index\" for PAPR PHB is mandatory");
- return;
+ return false;
+ }
+
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
+ if (drc && drc->dev) {
+ error_setg(errp, "PHB %d already attached", sphb->index);
+ return false;
}
/*
* This will check that sphb->index doesn't exceed the maximum number of
* PHBs for the current machine type.
*/
- smc->phb_placement(spapr, sphb->index,
- &sphb->buid, &sphb->io_win_addr,
- &sphb->mem_win_addr, &sphb->mem64_win_addr,
- windows_supported, sphb->dma_liobn,
- &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr,
- errp);
+ return
+ smc->phb_placement(spapr, sphb->index,
+ &sphb->buid, &sphb->io_win_addr,
+ &sphb->mem_win_addr, &sphb->mem64_win_addr,
+ windows_supported, sphb->dma_liobn,
+ &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr,
+ errp);
}
-static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
+static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
{
SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
@@ -3940,9 +3943,8 @@ static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
/* hotplug hooks should check it's enabled before getting this far */
assert(drc);
- if (!spapr_drc_attach(drc, dev, errp)) {
- return;
- }
+ /* spapr_phb_pre_plug() already checked the DRC is attachable */
+ spapr_drc_attach(drc, dev);
if (hotplugged) {
spapr_hotplug_req_add_by_index(drc);
@@ -3979,17 +3981,28 @@ static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev,
}
}
-static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
- Error **errp)
+static
+bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
{
SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
- SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev);
if (spapr->tpm_proxy != NULL) {
error_setg(errp, "Only one TPM proxy can be specified for this machine");
- return;
+ return false;
}
+ return true;
+}
+
+static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
+{
+ SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
+ SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev);
+
+ /* Already checked in spapr_tpm_proxy_pre_plug() */
+ g_assert(spapr->tpm_proxy == NULL);
+
spapr->tpm_proxy = tpm_proxy;
}
@@ -4006,13 +4019,13 @@ static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
- spapr_memory_plug(hotplug_dev, dev, errp);
+ spapr_memory_plug(hotplug_dev, dev);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
- spapr_core_plug(hotplug_dev, dev, errp);
+ spapr_core_plug(hotplug_dev, dev);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
- spapr_phb_plug(hotplug_dev, dev, errp);
+ spapr_phb_plug(hotplug_dev, dev);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
- spapr_tpm_proxy_plug(hotplug_dev, dev, errp);
+ spapr_tpm_proxy_plug(hotplug_dev, dev);
}
}
@@ -4075,6 +4088,8 @@ static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
spapr_core_pre_plug(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
spapr_phb_pre_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
+ spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp);
}
}
@@ -4158,7 +4173,7 @@ static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
return machine->possible_cpus;
}
-static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
+static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
unsigned n_dma, uint32_t *liobns,
@@ -4196,7 +4211,7 @@ static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
if (index >= SPAPR_MAX_PHBS) {
error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
SPAPR_MAX_PHBS - 1);
- return;
+ return false;
}
*buid = base_buid + index;
@@ -4210,6 +4225,7 @@ static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
*nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE;
*nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE;
+ return true;
}
static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
@@ -4600,18 +4616,21 @@ DEFINE_SPAPR_MACHINE(4_1, "4.1", false);
/*
* pseries-4.0
*/
-static void phb_placement_4_0(SpaprMachineState *spapr, uint32_t index,
+static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
unsigned n_dma, uint32_t *liobns,
hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
{
- spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, liobns,
- nv2gpa, nv2atsd, errp);
+ if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma,
+ liobns, nv2gpa, nv2atsd, errp)) {
+ return false;
+ }
+
*nv2gpa = 0;
*nv2atsd = 0;
+ return true;
}
-
static void spapr_machine_4_0_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
@@ -4771,7 +4790,7 @@ DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
* pseries-2.7
*/
-static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
+static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
unsigned n_dma, uint32_t *liobns,
@@ -4803,7 +4822,7 @@ static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
if (index > max_index) {
error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
max_index);
- return;
+ return false;
}
*buid = base_buid + index;
@@ -4822,6 +4841,7 @@ static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
*nv2gpa = 0;
*nv2atsd = 0;
+ return true;
}
static void spapr_machine_2_7_class_options(MachineClass *mc)
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index 77718cde1f..f991cf89a0 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -369,14 +369,11 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
} while (fdt_depth != 0);
}
-bool spapr_drc_attach(SpaprDrc *drc, DeviceState *d, Error **errp)
+void spapr_drc_attach(SpaprDrc *drc, DeviceState *d)
{
trace_spapr_drc_attach(spapr_drc_index(drc));
- if (drc->dev) {
- error_setg(errp, "an attached device is still awaiting release");
- return false;
- }
+ g_assert(!drc->dev);
g_assert((drc->state == SPAPR_DRC_STATE_LOGICAL_UNUSABLE)
|| (drc->state == SPAPR_DRC_STATE_PHYSICAL_POWERON));
@@ -386,7 +383,6 @@ bool spapr_drc_attach(SpaprDrc *drc, DeviceState *d, Error **errp)
object_get_typename(OBJECT(drc->dev)),
(Object **)(&drc->dev),
NULL, 0);
- return true;
}
static void spapr_drc_release(SpaprDrc *drc)
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index 1add53547e..3f37b49fd8 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -480,9 +480,8 @@ static SpaprEventLogEntry *rtas_event_log_dequeue(SpaprMachineState *spapr,
return entry;
}
-static bool rtas_event_log_contains(uint32_t event_mask)
+static bool rtas_event_log_contains(SpaprMachineState *spapr, uint32_t event_mask)
{
- SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
SpaprEventLogEntry *entry = NULL;
QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
@@ -509,10 +508,10 @@ static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr)
v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
}
-static void spapr_init_maina(struct rtas_event_log_v6_maina *maina,
+static void spapr_init_maina(SpaprMachineState *spapr,
+ struct rtas_event_log_v6_maina *maina,
int section_count)
{
- SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
struct tm tm;
int year;
@@ -560,7 +559,7 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
entry->extended_length = sizeof(*new_epow);
spapr_init_v6hdr(v6hdr);
- spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */);
+ spapr_init_maina(spapr, maina, 3 /* Main-A, Main-B and EPOW */);
mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
@@ -613,7 +612,7 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
entry->extended_length = sizeof(*new_hp);
spapr_init_v6hdr(v6hdr);
- spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */);
+ spapr_init_maina(spapr, maina, 3 /* Main-A, Main-B, HP */);
mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
@@ -808,9 +807,9 @@ static uint32_t spapr_mce_get_elog_type(PowerPCCPU *cpu, bool recovered,
return summary;
}
-static void spapr_mce_dispatch_elog(PowerPCCPU *cpu, bool recovered)
+static void spapr_mce_dispatch_elog(SpaprMachineState *spapr, PowerPCCPU *cpu,
+ bool recovered)
{
- SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
uint64_t rtas_addr;
@@ -927,7 +926,7 @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
warn_report("Received a fwnmi while migration was in progress");
}
- spapr_mce_dispatch_elog(cpu, recovered);
+ spapr_mce_dispatch_elog(spapr, cpu, recovered);
}
static void check_exception(PowerPCCPU *cpu, SpaprMachineState *spapr,
@@ -980,7 +979,7 @@ static void check_exception(PowerPCCPU *cpu, SpaprMachineState *spapr,
* interrupts.
*/
for (i = 0; i < EVENT_CLASS_MAX; i++) {
- if (rtas_event_log_contains(EVENT_CLASS_MASK(i))) {
+ if (rtas_event_log_contains(spapr, EVENT_CLASS_MASK(i))) {
const SpaprEventSource *source =
spapr_event_sources_get_source(spapr->event_sources, i);
@@ -1007,7 +1006,7 @@ static void event_scan(PowerPCCPU *cpu, SpaprMachineState *spapr,
}
for (i = 0; i < EVENT_CLASS_MAX; i++) {
- if (rtas_event_log_contains(EVENT_CLASS_MASK(i))) {
+ if (rtas_event_log_contains(spapr, EVENT_CLASS_MASK(i))) {
const SpaprEventSource *source =
spapr_event_sources_get_source(spapr->event_sources, i);
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 1d8e8e6a88..c0ea0bd579 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -1351,6 +1351,7 @@ static target_ulong h_logical_dcbf(PowerPCCPU *cpu, SpaprMachineState *spapr,
}
static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
target_ulong mflags,
target_ulong value1,
target_ulong value2)
@@ -1365,12 +1366,12 @@ static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
switch (mflags) {
case H_SET_MODE_ENDIAN_BIG:
spapr_set_all_lpcrs(0, LPCR_ILE);
- spapr_pci_switch_vga(true);
+ spapr_pci_switch_vga(spapr, true);
return H_SUCCESS;
case H_SET_MODE_ENDIAN_LITTLE:
spapr_set_all_lpcrs(LPCR_ILE, LPCR_ILE);
- spapr_pci_switch_vga(false);
+ spapr_pci_switch_vga(spapr, false);
return H_SUCCESS;
}
@@ -1411,7 +1412,7 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr,
switch (resource) {
case H_SET_MODE_RESOURCE_LE:
- ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
+ ret = h_set_mode_resource_le(cpu, spapr, args[0], args[2], args[3]);
break;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c
index f59960339e..a0d1e1298e 100644
--- a/hw/ppc/spapr_irq.c
+++ b/hw/ppc/spapr_irq.c
@@ -186,7 +186,7 @@ static int spapr_irq_check(SpaprMachineState *spapr, Error **errp)
if (kvm_enabled() &&
spapr->irq == &spapr_irq_dual &&
kvm_kernel_irqchip_required() &&
- xics_kvm_has_broken_disconnect(spapr)) {
+ xics_kvm_has_broken_disconnect()) {
error_setg(errp,
"KVM is incompatible with ic-mode=dual,kernel-irqchip=on");
error_append_hint(errp,
diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c
index a833a63b5e..73ee006541 100644
--- a/hw/ppc/spapr_nvdimm.c
+++ b/hw/ppc/spapr_nvdimm.c
@@ -89,7 +89,7 @@ bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
}
-bool spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp)
+void spapr_add_nvdimm(DeviceState *dev, uint64_t slot)
{
SpaprDrc *drc;
bool hotplugged = spapr_drc_hotplugged(dev);
@@ -97,14 +97,15 @@ bool spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp)
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PMEM, slot);
g_assert(drc);
- if (!spapr_drc_attach(drc, dev, errp)) {
- return false;
- }
+ /*
+ * pc_dimm_get_free_slot() provided a free slot at pre-plug. The
+ * corresponding DRC is thus assumed to be attachable.
+ */
+ spapr_drc_attach(drc, dev);
if (hotplugged) {
spapr_hotplug_req_add_by_index(drc);
}
- return true;
}
static int spapr_dt_nvdimm(SpaprMachineState *spapr, void *fdt,
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 88ce87f130..76d7c91e9c 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -747,7 +747,7 @@ static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
static void spapr_msi_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
- SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ SpaprMachineState *spapr = opaque;
uint32_t irq = data;
trace_spapr_pci_msi_write(addr, data, irq);
@@ -1532,8 +1532,8 @@ static bool bridge_has_valid_chassis_nr(Object *bridge, Error **errp)
return true;
}
-static void spapr_pci_plug(HotplugHandler *plug_handler,
- DeviceState *plugged_dev, Error **errp)
+static void spapr_pci_pre_plug(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
{
SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
PCIDevice *pdev = PCI_DEVICE(plugged_dev);
@@ -1542,9 +1542,6 @@ static void spapr_pci_plug(HotplugHandler *plug_handler,
PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
uint32_t slotnr = PCI_SLOT(pdev->devfn);
- /* if DR is disabled we don't need to do anything in the case of
- * hotplug or coldplug callbacks
- */
if (!phb->dr_enabled) {
/* if this is a hotplug operation initiated by the user
* we need to let them know it's not enabled
@@ -1552,17 +1549,14 @@ static void spapr_pci_plug(HotplugHandler *plug_handler,
if (plugged_dev->hotplugged) {
error_setg(errp, QERR_BUS_NO_HOTPLUG,
object_get_typename(OBJECT(phb)));
+ return;
}
- return;
}
- g_assert(drc);
-
if (pc->is_bridge) {
if (!bridge_has_valid_chassis_nr(OBJECT(plugged_dev), errp)) {
return;
}
- spapr_pci_bridge_plug(phb, PCI_BRIDGE(plugged_dev));
}
/* Following the QEMU convention used for PCIe multifunction
@@ -1574,13 +1568,41 @@ static void spapr_pci_plug(HotplugHandler *plug_handler,
error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
" additional functions can no longer be exposed to guest.",
slotnr, bus->devices[PCI_DEVFN(slotnr, 0)]->name);
+ }
+
+ if (drc && drc->dev) {
+ error_setg(errp, "PCI: slot %d already occupied by %s", slotnr,
+ pci_get_function_0(PCI_DEVICE(drc->dev))->name);
return;
}
+}
+
+static void spapr_pci_plug(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
+{
+ SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
+ PCIDevice *pdev = PCI_DEVICE(plugged_dev);
+ PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(plugged_dev);
+ SpaprDrc *drc = drc_from_dev(phb, pdev);
+ uint32_t slotnr = PCI_SLOT(pdev->devfn);
- if (!spapr_drc_attach(drc, DEVICE(pdev), errp)) {
+ /*
+ * If DR is disabled we don't need to do anything in the case of
+ * hotplug or coldplug callbacks.
+ */
+ if (!phb->dr_enabled) {
return;
}
+ g_assert(drc);
+
+ if (pc->is_bridge) {
+ spapr_pci_bridge_plug(phb, PCI_BRIDGE(plugged_dev));
+ }
+
+ /* spapr_pci_pre_plug() already checked the DRC is attachable */
+ spapr_drc_attach(drc, DEVICE(pdev));
+
/* If this is function 0, signal hotplug for all the device functions.
* Otherwise defer sending the hotplug event.
*/
@@ -2223,6 +2245,7 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data)
/* Supported by TYPE_SPAPR_MACHINE */
dc->user_creatable = true;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+ hp->pre_plug = spapr_pci_pre_plug;
hp->plug = spapr_pci_plug;
hp->unplug = spapr_pci_unplug;
hp->unplug_request = spapr_pci_unplug_request;
@@ -2470,9 +2493,8 @@ static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
return 0;
}
-void spapr_pci_switch_vga(bool big_endian)
+void spapr_pci_switch_vga(SpaprMachineState *spapr, bool big_endian)
{
- SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
SpaprPhbState *sphb;
/*
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index dcc06d49b5..6d8d095aa2 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -19,7 +19,7 @@ spapr_update_dt_failed_size(unsigned cbold, unsigned cbnew, unsigned magic) "Old
spapr_update_dt_failed_check(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x"
# spapr_tpm_proxy.c
-spapr_h_tpm_comm(const char *device_path, uint64_t operation) "tpm_device_path=%s operation=0x%"PRIu64
+spapr_h_tpm_comm(const char *device_path, uint64_t operation) "tpm_device_path=%s operation=0x%"PRIx64
spapr_tpm_execute(uint64_t data_in, uint64_t data_in_sz, uint64_t data_out, uint64_t data_out_sz) "data_in=0x%"PRIx64", data_in_sz=%"PRIu64", data_out=0x%"PRIx64", data_out_sz=%"PRIu64
# spapr_iommu.c