aboutsummaryrefslogtreecommitdiff
path: root/hw/intc
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-08-24 09:35:21 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-08-24 09:35:21 +0100
commitdd8014e4e904e895435aae9f11a686f072762782 (patch)
treeea1f526128f3d88a92f90cf8833b3adf9c8ff828 /hw/intc
parent8367a77c4d3f6e1e60890f5510304feb2c621611 (diff)
parent3110f0ee19ccdb50adff3dfa1321039f69efddcd (diff)
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-5.2-20200818' into staging
ppc patch queue 2020-08-18 Here's my first pull request for qemu-5.2, which has quite a few accumulated things. Highlights are: * Preliminary support for POWER10 (Power ISA 3.1) instruction emulation * Add documentation on the (very confusing) pseries NUMA configuration * Fix some bugs handling edge cases with XICS, XIVE and kernel_irqchip * Fix icount for a number of POWER registers * Many cleanups to error handling in XIVE code * Validate size of -prom-env data # gpg: Signature made Tue 18 Aug 2020 05:18:36 BST # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-5.2-20200818: (40 commits) spapr/xive: Use xive_source_esb_len() nvram: Exit QEMU if NVRAM cannot contain all -prom-env data spapr/xive: Simplify error handling of kvmppc_xive_cpu_synchronize_state() ppc/xive: Simplify error handling in xive_tctx_realize() spapr/xive: Simplify error handling in kvmppc_xive_connect() ppc/xive: Fix error handling in vmstate_xive_tctx_*() callbacks spapr/xive: Fix error handling in kvmppc_xive_post_load() spapr/kvm: Fix error handling in kvmppc_xive_pre_save() spapr/xive: Rework error handling of kvmppc_xive_set_source_config() spapr/xive: Rework error handling in kvmppc_xive_get_queues() spapr/xive: Rework error handling of kvmppc_xive_[gs]et_queue_config() spapr/xive: Rework error handling of kvmppc_xive_cpu_[gs]et_state() spapr/xive: Rework error handling of kvmppc_xive_mmap() spapr/xive: Rework error handling of kvmppc_xive_source_reset() spapr/xive: Rework error handling of kvmppc_xive_cpu_connect() spapr: Simplify error handling in spapr_phb_realize() spapr/xive: Convert KVM device fd checks to assert() ppc/xive: Introduce dedicated kvm_irqchip_in_kernel() wrappers ppc/xive: Rework setup of XiveSource::esb_mmio target/ppc: Integrate icount to purr, vtb, and tbu40 ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/intc')
-rw-r--r--hw/intc/spapr_xive.c47
-rw-r--r--hw/intc/spapr_xive_kvm.c257
-rw-r--r--hw/intc/xive.c57
3 files changed, 185 insertions, 176 deletions
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
index 89c8cd9667..4bd0d606ba 100644
--- a/hw/intc/spapr_xive.c
+++ b/hw/intc/spapr_xive.c
@@ -148,12 +148,19 @@ static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
xive_end_queue_pic_print_info(end, 6, mon);
}
+/*
+ * kvm_irqchip_in_kernel() will cause the compiler to turn this
+ * info a nop if CONFIG_KVM isn't defined.
+ */
+#define spapr_xive_in_kernel(xive) \
+ (kvm_irqchip_in_kernel() && (xive)->fd != -1)
+
void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
{
XiveSource *xsrc = &xive->source;
int i;
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
Error *local_err = NULL;
kvmppc_xive_synchronize_state(xive, &local_err);
@@ -329,7 +336,7 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
/* Set the mapping address of the END ESB pages after the source ESBs */
- xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
+ xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
/*
* Allocate the routing tables
@@ -507,8 +514,10 @@ static const VMStateDescription vmstate_spapr_xive_eas = {
static int vmstate_spapr_xive_pre_save(void *opaque)
{
- if (kvm_irqchip_in_kernel()) {
- return kvmppc_xive_pre_save(SPAPR_XIVE(opaque));
+ SpaprXive *xive = SPAPR_XIVE(opaque);
+
+ if (spapr_xive_in_kernel(xive)) {
+ return kvmppc_xive_pre_save(xive);
}
return 0;
@@ -520,8 +529,10 @@ static int vmstate_spapr_xive_pre_save(void *opaque)
*/
static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
{
- if (kvm_irqchip_in_kernel()) {
- return kvmppc_xive_post_load(SPAPR_XIVE(intc), version_id);
+ SpaprXive *xive = SPAPR_XIVE(intc);
+
+ if (spapr_xive_in_kernel(xive)) {
+ return kvmppc_xive_post_load(xive, version_id);
}
return 0;
@@ -564,7 +575,7 @@ static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
xive_source_irq_set_lsi(xsrc, lisn);
}
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
}
@@ -641,7 +652,7 @@ static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
{
SpaprXive *xive = SPAPR_XIVE(intc);
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
kvmppc_xive_source_set_irq(&xive->source, irq, val);
} else {
xive_source_set_irq(&xive->source, irq, val);
@@ -749,11 +760,16 @@ static void spapr_xive_deactivate(SpaprInterruptController *intc)
spapr_xive_mmio_set_enabled(xive, false);
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
kvmppc_xive_disconnect(intc);
}
}
+static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
+{
+ return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
+}
+
static void spapr_xive_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -788,6 +804,7 @@ static void spapr_xive_class_init(ObjectClass *klass, void *data)
sicc->post_load = spapr_xive_post_load;
xpc->match_nvt = spapr_xive_match_nvt;
+ xpc->in_kernel = spapr_xive_in_kernel_xptr;
}
static const TypeInfo spapr_xive_info = {
@@ -1058,7 +1075,7 @@ static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
}
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
Error *local_err = NULL;
kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
@@ -1379,7 +1396,7 @@ static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
*/
out:
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
Error *local_err = NULL;
kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
@@ -1480,7 +1497,7 @@ static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
args[2] = 0;
}
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
Error *local_err = NULL;
kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
@@ -1642,7 +1659,7 @@ static target_ulong h_int_esb(PowerPCCPU *cpu,
return H_P3;
}
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
flags & SPAPR_XIVE_ESB_STORE);
} else {
@@ -1717,7 +1734,7 @@ static target_ulong h_int_sync(PowerPCCPU *cpu,
* under KVM
*/
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
Error *local_err = NULL;
kvmppc_xive_sync_source(xive, lisn, &local_err);
@@ -1761,7 +1778,7 @@ static target_ulong h_int_reset(PowerPCCPU *cpu,
device_legacy_reset(DEVICE(xive));
- if (kvm_irqchip_in_kernel()) {
+ if (spapr_xive_in_kernel(xive)) {
Error *local_err = NULL;
kvmppc_xive_reset(xive, &local_err);
diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c
index edb7ee0e74..e8667ce5f6 100644
--- a/hw/intc/spapr_xive_kvm.c
+++ b/hw/intc/spapr_xive_kvm.c
@@ -73,54 +73,54 @@ static void kvm_cpu_disable_all(void)
* XIVE Thread Interrupt Management context (KVM)
*/
-void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp)
+int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp)
{
SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
uint64_t state[2];
int ret;
- /* The KVM XIVE device is not in use yet */
- if (xive->fd == -1) {
- return;
- }
+ assert(xive->fd != -1);
/* word0 and word1 of the OS ring. */
state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]);
ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
if (ret != 0) {
- error_setg_errno(errp, errno,
+ error_setg_errno(errp, -ret,
"XIVE: could not restore KVM state of CPU %ld",
kvm_arch_vcpu_id(tctx->cs));
+ return ret;
}
+
+ return 0;
}
-void kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp)
+int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp)
{
SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
uint64_t state[2] = { 0 };
int ret;
- /* The KVM XIVE device is not in use */
- if (xive->fd == -1) {
- return;
- }
+ assert(xive->fd != -1);
ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
if (ret != 0) {
- error_setg_errno(errp, errno,
+ error_setg_errno(errp, -ret,
"XIVE: could not capture KVM state of CPU %ld",
kvm_arch_vcpu_id(tctx->cs));
- return;
+ return ret;
}
/* word0 and word1 of the OS ring. */
*((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0];
+
+ return 0;
}
typedef struct {
XiveTCTX *tctx;
- Error *err;
+ Error **errp;
+ int ret;
} XiveCpuGetState;
static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu,
@@ -128,14 +128,14 @@ static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu,
{
XiveCpuGetState *s = arg.host_ptr;
- kvmppc_xive_cpu_get_state(s->tctx, &s->err);
+ s->ret = kvmppc_xive_cpu_get_state(s->tctx, s->errp);
}
-void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp)
+int kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp)
{
XiveCpuGetState s = {
.tctx = tctx,
- .err = NULL,
+ .errp = errp,
};
/*
@@ -144,26 +144,21 @@ void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp)
run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state,
RUN_ON_CPU_HOST_PTR(&s));
- if (s.err) {
- error_propagate(errp, s.err);
- return;
- }
+ return s.ret;
}
-void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
+int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
{
+ ERRP_GUARD();
SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
unsigned long vcpu_id;
int ret;
- /* The KVM XIVE device is not in use */
- if (xive->fd == -1) {
- return;
- }
+ assert(xive->fd != -1);
/* Check if CPU was hot unplugged and replugged. */
if (kvm_cpu_is_enabled(tctx->cs)) {
- return;
+ return 0;
}
vcpu_id = kvm_arch_vcpu_id(tctx->cs);
@@ -171,28 +166,26 @@ void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd,
vcpu_id, 0);
if (ret < 0) {
- Error *local_err = NULL;
-
- error_setg(&local_err,
- "XIVE: unable to connect CPU%ld to KVM device: %s",
- vcpu_id, strerror(errno));
- if (errno == ENOSPC) {
- error_append_hint(&local_err, "Try -smp maxcpus=N with N < %u\n",
+ error_setg_errno(errp, -ret,
+ "XIVE: unable to connect CPU%ld to KVM device",
+ vcpu_id);
+ if (ret == -ENOSPC) {
+ error_append_hint(errp, "Try -smp maxcpus=N with N < %u\n",
MACHINE(qdev_get_machine())->smp.max_cpus);
}
- error_propagate(errp, local_err);
- return;
+ return ret;
}
kvm_cpu_enable(tctx->cs);
+ return 0;
}
/*
* XIVE Interrupt Source (KVM)
*/
-void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
- Error **errp)
+int kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
+ Error **errp)
{
uint32_t end_idx;
uint32_t end_blk;
@@ -201,7 +194,6 @@ void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
bool masked;
uint32_t eisn;
uint64_t kvm_src;
- Error *local_err = NULL;
assert(xive_eas_is_valid(eas));
@@ -221,12 +213,8 @@ void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) &
KVM_XIVE_SOURCE_EISN_MASK;
- kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn,
- &kvm_src, true, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
+ return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn,
+ &kvm_src, true, errp);
}
void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp)
@@ -245,10 +233,7 @@ int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp)
SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
uint64_t state = 0;
- /* The KVM XIVE device is not in use */
- if (xive->fd == -1) {
- return -ENODEV;
- }
+ assert(xive->fd != -1);
if (xive_source_irq_is_lsi(xsrc, srcno)) {
state |= KVM_XIVE_LEVEL_SENSITIVE;
@@ -261,24 +246,25 @@ int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp)
true, errp);
}
-static void kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp)
+static int kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp)
{
SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
int i;
for (i = 0; i < xsrc->nr_irqs; i++) {
- Error *local_err = NULL;
+ int ret;
if (!xive_eas_is_valid(&xive->eat[i])) {
continue;
}
- kvmppc_xive_source_reset_one(xsrc, i, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
+ ret = kvmppc_xive_source_reset_one(xsrc, i, errp);
+ if (ret < 0) {
+ return ret;
}
}
+
+ return 0;
}
/*
@@ -381,15 +367,15 @@ void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val)
/*
* sPAPR XIVE interrupt controller (KVM)
*/
-void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
- uint32_t end_idx, XiveEND *end,
- Error **errp)
+int kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
+ uint32_t end_idx, XiveEND *end,
+ Error **errp)
{
struct kvm_ppc_xive_eq kvm_eq = { 0 };
uint64_t kvm_eq_idx;
uint8_t priority;
uint32_t server;
- Error *local_err = NULL;
+ int ret;
assert(xive_end_is_valid(end));
@@ -401,11 +387,10 @@ void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
KVM_XIVE_EQ_SERVER_MASK;
- kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
- &kvm_eq, false, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
+ ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
+ &kvm_eq, false, errp);
+ if (ret < 0) {
+ return ret;
}
/*
@@ -415,17 +400,18 @@ void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
*/
end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) |
xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex);
+
+ return 0;
}
-void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
- uint32_t end_idx, XiveEND *end,
- Error **errp)
+int kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
+ uint32_t end_idx, XiveEND *end,
+ Error **errp)
{
struct kvm_ppc_xive_eq kvm_eq = { 0 };
uint64_t kvm_eq_idx;
uint8_t priority;
uint32_t server;
- Error *local_err = NULL;
/*
* Build the KVM state from the local END structure.
@@ -463,12 +449,9 @@ void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
KVM_XIVE_EQ_SERVER_MASK;
- kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
- &kvm_eq, true, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
+ return
+ kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
+ &kvm_eq, true, errp);
}
void kvmppc_xive_reset(SpaprXive *xive, Error **errp)
@@ -477,23 +460,24 @@ void kvmppc_xive_reset(SpaprXive *xive, Error **errp)
NULL, true, errp);
}
-static void kvmppc_xive_get_queues(SpaprXive *xive, Error **errp)
+static int kvmppc_xive_get_queues(SpaprXive *xive, Error **errp)
{
- Error *local_err = NULL;
int i;
+ int ret;
for (i = 0; i < xive->nr_ends; i++) {
if (!xive_end_is_valid(&xive->endt[i])) {
continue;
}
- kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
- &xive->endt[i], &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
+ ret = kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
+ &xive->endt[i], errp);
+ if (ret < 0) {
+ return ret;
}
}
+
+ return 0;
}
/*
@@ -592,10 +576,7 @@ static void kvmppc_xive_change_state_handler(void *opaque, int running,
void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp)
{
- /* The KVM XIVE device is not in use */
- if (xive->fd == -1) {
- return;
- }
+ assert(xive->fd != -1);
/*
* When the VM is stopped, the sources are masked and the previous
@@ -621,19 +602,17 @@ void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp)
int kvmppc_xive_pre_save(SpaprXive *xive)
{
Error *local_err = NULL;
+ int ret;
- /* The KVM XIVE device is not in use */
- if (xive->fd == -1) {
- return 0;
- }
+ assert(xive->fd != -1);
/* EAT: there is no extra state to query from KVM */
/* ENDT */
- kvmppc_xive_get_queues(xive, &local_err);
- if (local_err) {
+ ret = kvmppc_xive_get_queues(xive, &local_err);
+ if (ret < 0) {
error_report_err(local_err);
- return -1;
+ return ret;
}
return 0;
@@ -650,6 +629,7 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
Error *local_err = NULL;
CPUState *cs;
int i;
+ int ret;
/* The KVM XIVE device should be in use */
assert(xive->fd != -1);
@@ -660,11 +640,10 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
continue;
}
- kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
- &xive->endt[i], &local_err);
- if (local_err) {
- error_report_err(local_err);
- return -1;
+ ret = kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
+ &xive->endt[i], &local_err);
+ if (ret < 0) {
+ goto fail;
}
}
@@ -679,16 +658,14 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
* previously set in KVM. Since we don't do that for all interrupts
* at reset time anymore, let's do it now.
*/
- kvmppc_xive_source_reset_one(&xive->source, i, &local_err);
- if (local_err) {
- error_report_err(local_err);
- return -1;
+ ret = kvmppc_xive_source_reset_one(&xive->source, i, &local_err);
+ if (ret < 0) {
+ goto fail;
}
- kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err);
- if (local_err) {
- error_report_err(local_err);
- return -1;
+ ret = kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err);
+ if (ret < 0) {
+ goto fail;
}
}
@@ -705,17 +682,21 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
CPU_FOREACH(cs) {
PowerPCCPU *cpu = POWERPC_CPU(cs);
- kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err);
- if (local_err) {
- error_report_err(local_err);
- return -1;
+ ret = kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err);
+ if (ret < 0) {
+ goto fail;
}
}
/* The source states will be restored when the machine starts running */
return 0;
+
+fail:
+ error_report_err(local_err);
+ return ret;
}
+/* Returns MAP_FAILED on error and sets errno */
static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len,
Error **errp)
{
@@ -726,7 +707,6 @@ static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len,
pgoff << page_shift);
if (addr == MAP_FAILED) {
error_setg_errno(errp, errno, "XIVE: unable to set memory mapping");
- return NULL;
}
return addr;
@@ -741,10 +721,12 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
{
SpaprXive *xive = SPAPR_XIVE(intc);
XiveSource *xsrc = &xive->source;
- Error *local_err = NULL;
- size_t esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
+ size_t esb_len = xive_source_esb_len(xsrc);
size_t tima_len = 4ull << TM_SHIFT;
CPUState *cs;
+ int fd;
+ void *addr;
+ int ret;
/*
* The KVM XIVE device already in use. This is the case when
@@ -760,18 +742,20 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
}
/* First, create the KVM XIVE device */
- xive->fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false);
- if (xive->fd < 0) {
- error_setg_errno(errp, -xive->fd, "XIVE: error creating KVM device");
+ fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false);
+ if (fd < 0) {
+ error_setg_errno(errp, -fd, "XIVE: error creating KVM device");
return -1;
}
+ xive->fd = fd;
/* Tell KVM about the # of VCPUs we may have */
if (kvm_device_check_attr(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
KVM_DEV_XIVE_NR_SERVERS)) {
- if (kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
- KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true,
- &local_err)) {
+ ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
+ KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true,
+ errp);
+ if (ret < 0) {
goto fail;
}
}
@@ -779,14 +763,14 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
/*
* 1. Source ESB pages - KVM mapping
*/
- xsrc->esb_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len,
- &local_err);
- if (local_err) {
+ addr = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len, errp);
+ if (addr == MAP_FAILED) {
goto fail;
}
+ xsrc->esb_mmap = addr;
memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc),
- "xive.esb", esb_len, xsrc->esb_mmap);
+ "xive.esb-kvm", esb_len, xsrc->esb_mmap);
memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0,
&xsrc->esb_mmio_kvm, 1);
@@ -797,11 +781,12 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
/*
* 3. TIMA pages - KVM mapping
*/
- xive->tm_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len,
- &local_err);
- if (local_err) {
+ addr = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len, errp);
+ if (addr == MAP_FAILED) {
goto fail;
}
+ xive->tm_mmap = addr;
+
memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive),
"xive.tima", tima_len, xive->tm_mmap);
memory_region_add_subregion_overlap(&xive->tm_mmio, 0,
@@ -814,15 +799,15 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
CPU_FOREACH(cs) {
PowerPCCPU *cpu = POWERPC_CPU(cs);
- kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, &local_err);
- if (local_err) {
+ ret = kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, errp);
+ if (ret < 0) {
goto fail;
}
}
/* Update the KVM sources */
- kvmppc_xive_source_reset(xsrc, &local_err);
- if (local_err) {
+ ret = kvmppc_xive_source_reset(xsrc, errp);
+ if (ret < 0) {
goto fail;
}
@@ -832,7 +817,6 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
return 0;
fail:
- error_propagate(errp, local_err);
kvmppc_xive_disconnect(intc);
return -1;
}
@@ -843,14 +827,11 @@ void kvmppc_xive_disconnect(SpaprInterruptController *intc)
XiveSource *xsrc;
size_t esb_len;
- /* The KVM XIVE device is not in use */
- if (!xive || xive->fd == -1) {
- return;
- }
+ assert(xive->fd != -1);
/* Clear the KVM mapping */
xsrc = &xive->source;
- esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
+ esb_len = xive_source_esb_len(xsrc);
if (xsrc->esb_mmap) {
memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm);
@@ -871,10 +852,8 @@ void kvmppc_xive_disconnect(SpaprInterruptController *intc)
* and removed from the list of devices of the VM. The VCPU
* presenters are also detached from the device.
*/
- if (xive->fd != -1) {
- close(xive->fd);
- xive->fd = -1;
- }
+ close(xive->fd);
+ xive->fd = -1;
kvm_kernel_irqchip = false;
kvm_msi_via_irqfd_allowed = false;
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index 9a162431e0..489e6256ef 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -592,6 +592,17 @@ static const char * const xive_tctx_ring_names[] = {
"USER", "OS", "POOL", "PHYS",
};
+/*
+ * kvm_irqchip_in_kernel() will cause the compiler to turn this
+ * info a nop if CONFIG_KVM isn't defined.
+ */
+#define xive_in_kernel(xptr) \
+ (kvm_irqchip_in_kernel() && \
+ ({ \
+ XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \
+ xpc->in_kernel ? xpc->in_kernel(xptr) : false; \
+ }))
+
void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
{
int cpu_index;
@@ -606,7 +617,7 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
- if (kvm_irqchip_in_kernel()) {
+ if (xive_in_kernel(tctx->xptr)) {
Error *local_err = NULL;
kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
@@ -651,7 +662,6 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp)
XiveTCTX *tctx = XIVE_TCTX(dev);
PowerPCCPU *cpu;
CPUPPCState *env;
- Error *local_err = NULL;
assert(tctx->cs);
assert(tctx->xptr);
@@ -671,10 +681,8 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp)
}
/* Connect the presenter to the VCPU (required for CPU hotplug) */
- if (kvm_irqchip_in_kernel()) {
- kvmppc_xive_cpu_connect(tctx, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (xive_in_kernel(tctx->xptr)) {
+ if (kvmppc_xive_cpu_connect(tctx, errp) < 0) {
return;
}
}
@@ -682,13 +690,15 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp)
static int vmstate_xive_tctx_pre_save(void *opaque)
{
+ XiveTCTX *tctx = XIVE_TCTX(opaque);
Error *local_err = NULL;
+ int ret;
- if (kvm_irqchip_in_kernel()) {
- kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err);
- if (local_err) {
+ if (xive_in_kernel(tctx->xptr)) {
+ ret = kvmppc_xive_cpu_get_state(tctx, &local_err);
+ if (ret < 0) {
error_report_err(local_err);
- return -1;
+ return ret;
}
}
@@ -697,17 +707,19 @@ static int vmstate_xive_tctx_pre_save(void *opaque)
static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
{
+ XiveTCTX *tctx = XIVE_TCTX(opaque);
Error *local_err = NULL;
+ int ret;
- if (kvm_irqchip_in_kernel()) {
+ if (xive_in_kernel(tctx->xptr)) {
/*
* Required for hotplugged CPU, for which the state comes
* after all states of the machine.
*/
- kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err);
- if (local_err) {
+ ret = kvmppc_xive_cpu_set_state(tctx, &local_err);
+ if (ret < 0) {
error_report_err(local_err);
- return -1;
+ return ret;
}
}
@@ -1128,6 +1140,7 @@ static void xive_source_reset(void *dev)
static void xive_source_realize(DeviceState *dev, Error **errp)
{
XiveSource *xsrc = XIVE_SOURCE(dev);
+ size_t esb_len = xive_source_esb_len(xsrc);
assert(xsrc->xive);
@@ -1147,11 +1160,11 @@ static void xive_source_realize(DeviceState *dev, Error **errp)
xsrc->status = g_malloc0(xsrc->nr_irqs);
xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
- if (!kvm_irqchip_in_kernel()) {
- memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
- &xive_source_esb_ops, xsrc, "xive.esb",
- (1ull << xsrc->esb_shift) * xsrc->nr_irqs);
- }
+ memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len);
+ memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc),
+ &xive_source_esb_ops, xsrc, "xive.esb-emulated",
+ esb_len);
+ memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated);
qemu_register_reset(xive_source_reset, dev);
}
@@ -1502,7 +1515,7 @@ static bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
/*
* Notification using the END ESe/ESn bit (Event State Buffer for
- * escalation and notification). Profide futher coalescing in the
+ * escalation and notification). Provide further coalescing in the
* Router.
*/
static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
@@ -1581,7 +1594,7 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
/*
* Check the END ESn (Event State Buffer for notification) for
- * even futher coalescing in the Router
+ * even further coalescing in the Router
*/
if (!xive_end_is_notify(&end)) {
/* ESn[Q]=1 : end of notification */
@@ -1660,7 +1673,7 @@ do_escalation:
/*
* Check the END ESe (Event State Buffer for escalation) for even
- * futher coalescing in the Router
+ * further coalescing in the Router
*/
if (!xive_end_is_uncond_escalation(&end)) {
/* ESe[Q]=1 : end of notification */