aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/Kconfig1
-rw-r--r--hw/acpi/ich9.c10
-rw-r--r--hw/acpi/ich9_tco.c2
-rw-r--r--hw/arm/sbsa-ref.c2
-rw-r--r--hw/audio/ac97.c45
-rw-r--r--hw/audio/ac97.h65
-rw-r--r--hw/audio/cs4231a.c5
-rw-r--r--hw/audio/es1370.c10
-rw-r--r--hw/audio/gus.c5
-rw-r--r--hw/audio/hda-codec.c7
-rw-r--r--hw/audio/sb16.c7
-rw-r--r--hw/block/fdc-isa.c5
-rw-r--r--hw/core/ptimer.c2
-rw-r--r--hw/core/qdev.c2
-rw-r--r--hw/display/sm501.c127
-rw-r--r--hw/dma/i82374.c2
-rw-r--r--hw/hppa/machine.c2
-rw-r--r--hw/i2c/smbus_ich9.c39
-rw-r--r--hw/i386/Kconfig5
-rw-r--r--hw/i386/acpi-build.c3
-rw-r--r--hw/i386/kvm/ioapic.c3
-rw-r--r--hw/i386/kvm/meson.build13
-rw-r--r--hw/i386/kvm/trace-events5
-rw-r--r--hw/i386/kvm/trace.h1
-rw-r--r--hw/i386/kvm/xen-stubs.c44
-rw-r--r--hw/i386/kvm/xen_evtchn.c2341
-rw-r--r--hw/i386/kvm/xen_evtchn.h88
-rw-r--r--hw/i386/kvm/xen_gnttab.c232
-rw-r--r--hw/i386/kvm/xen_gnttab.h25
-rw-r--r--hw/i386/kvm/xen_overlay.c272
-rw-r--r--hw/i386/kvm/xen_overlay.h26
-rw-r--r--hw/i386/kvm/xen_xenstore.c500
-rw-r--r--hw/i386/kvm/xen_xenstore.h20
-rw-r--r--hw/i386/microvm.c30
-rw-r--r--hw/i386/pc.c109
-rw-r--r--hw/i386/pc_piix.c3
-rw-r--r--hw/i386/pc_q35.c34
-rw-r--r--hw/i386/x86.c26
-rw-r--r--hw/i386/xen/meson.build5
-rw-r--r--hw/i386/xen/xen-hvm.c8
-rw-r--r--hw/i386/xen/xen_platform.c58
-rw-r--r--hw/ide/ahci.c13
-rw-r--r--hw/ide/atapi.c13
-rw-r--r--hw/ide/cmd646.c4
-rw-r--r--hw/ide/core.c80
-rw-r--r--hw/ide/ich.c1
-rw-r--r--hw/ide/ioport.c10
-rw-r--r--hw/ide/isa.c22
-rw-r--r--hw/ide/macio.c15
-rw-r--r--hw/ide/microdrive.c9
-rw-r--r--hw/ide/mmio.c37
-rw-r--r--hw/ide/pci.c11
-rw-r--r--hw/ide/piix.c48
-rw-r--r--hw/ide/qdev.c2
-rw-r--r--hw/ide/sii3112.c4
-rw-r--r--hw/ide/trace-events3
-rw-r--r--hw/ide/via.c16
-rw-r--r--hw/intc/apic.c2
-rw-r--r--hw/intc/i8259.c4
-rw-r--r--hw/intc/ioapic.c4
-rw-r--r--hw/intc/ioapic_common.c4
-rw-r--r--hw/intc/ioapic_internal.h118
-rw-r--r--hw/isa/i82378.c19
-rw-r--r--hw/isa/isa-bus.c32
-rw-r--r--hw/isa/lpc_ich9.c36
-rw-r--r--hw/isa/piix4.c4
-rw-r--r--hw/isa/vt82c686.c18
-rw-r--r--hw/mips/jazz.c2
-rw-r--r--hw/misc/macio/gpio.c1
-rw-r--r--hw/nubus/nubus-device.c1
-rw-r--r--hw/pci-bridge/i82801b11.c2
-rw-r--r--hw/pci/msi.c11
-rw-r--r--hw/pci/msix.c9
-rw-r--r--hw/pci/pci.c19
-rw-r--r--hw/ppc/pnv_lpc.c2
-rw-r--r--hw/ppc/prep.c11
-rw-r--r--hw/ppc/sam460ex.c4
-rw-r--r--hw/rtc/m48t59-isa.c2
-rw-r--r--hw/rtc/mc146818rtc.c128
-rw-r--r--hw/sh4/r2d.c4
-rw-r--r--hw/sparc64/sun4u.c13
-rw-r--r--hw/timer/hpet.c1
-rw-r--r--hw/usb/dev-smartcard-reader.c7
-rw-r--r--hw/usb/hcd-ohci.c442
-rw-r--r--hw/usb/hcd-ohci.h11
-rw-r--r--hw/usb/hcd-uhci.c7
-rw-r--r--hw/usb/hcd-uhci.h2
-rw-r--r--hw/usb/hcd-xhci-nec.c8
-rw-r--r--hw/usb/trace-events4
-rw-r--r--hw/usb/u2f.h16
-rw-r--r--hw/xen/Kconfig3
-rw-r--r--hw/xen/xen-legacy-backend.c56
-rw-r--r--hw/xenpv/xen_machine_pv.c6
93 files changed, 4688 insertions, 802 deletions
diff --git a/hw/Kconfig b/hw/Kconfig
index 38233bbb0f..ba62ff6417 100644
--- a/hw/Kconfig
+++ b/hw/Kconfig
@@ -41,6 +41,7 @@ source tpm/Kconfig
source usb/Kconfig
source virtio/Kconfig
source vfio/Kconfig
+source xen/Kconfig
source watchdog/Kconfig
# arch Kconfig
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index a93c470e9d..d23bfcaa6b 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -36,7 +36,7 @@
#include "hw/acpi/acpi.h"
#include "hw/acpi/ich9_tco.h"
-#include "hw/i386/ich9.h"
+#include "hw/southbridge/ich9.h"
#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
@@ -291,9 +291,7 @@ static void pm_powerdown_req(Notifier *n, void *opaque)
acpi_pm1_evt_power_down(&pm->acpi_regs);
}
-void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
- bool smm_enabled,
- qemu_irq sci_irq)
+void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq)
{
memory_region_init(&pm->io, OBJECT(lpc_pci), "ich9-pm", ICH9_PMIO_SIZE);
memory_region_set_enabled(&pm->io, false);
@@ -303,7 +301,7 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
acpi_pm_tmr_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io);
acpi_pm1_evt_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io);
acpi_pm1_cnt_init(&pm->acpi_regs, &pm->io, pm->disable_s3, pm->disable_s4,
- pm->s4_val, !pm->smm_compat && !smm_enabled);
+ pm->s4_val, !pm->smm_compat && !pm->smm_enabled);
acpi_gpe_init(&pm->acpi_regs, ICH9_PMIO_GPE0_LEN);
memory_region_init_io(&pm->io_gpe, OBJECT(lpc_pci), &ich9_gpe_ops, pm,
@@ -314,8 +312,6 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
"acpi-smi", 8);
memory_region_add_subregion(&pm->io, ICH9_PMIO_SMI_EN, &pm->io_smi);
- pm->smm_enabled = smm_enabled;
-
if (pm->enable_tco) {
acpi_pm_tco_init(&pm->tco_regs, &pm->io);
}
diff --git a/hw/acpi/ich9_tco.c b/hw/acpi/ich9_tco.c
index fbf97f81f4..1540f4fd46 100644
--- a/hw/acpi/ich9_tco.c
+++ b/hw/acpi/ich9_tco.c
@@ -9,7 +9,7 @@
#include "qemu/osdep.h"
#include "sysemu/watchdog.h"
-#include "hw/i386/ich9.h"
+#include "hw/southbridge/ich9.h"
#include "migration/vmstate.h"
#include "hw/acpi/ich9_tco.h"
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
index f778cb6d09..0b93558dde 100644
--- a/hw/arm/sbsa-ref.c
+++ b/hw/arm/sbsa-ref.c
@@ -554,7 +554,7 @@ static void create_ahci(const SBSAMachineState *sms)
if (hd[i] == NULL) {
continue;
}
- ide_create_drive(&ahci->dev[i].port, 0, hd[i]);
+ ide_bus_create_drive(&ahci->dev[i].port, 0, hd[i]);
}
}
diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c
index 364cdfa733..c2a5ce062a 100644
--- a/hw/audio/ac97.c
+++ b/hw/audio/ac97.c
@@ -26,43 +26,7 @@
#include "qemu/module.h"
#include "sysemu/dma.h"
#include "qom/object.h"
-
-enum {
- AC97_Reset = 0x00,
- AC97_Master_Volume_Mute = 0x02,
- AC97_Headphone_Volume_Mute = 0x04,
- AC97_Master_Volume_Mono_Mute = 0x06,
- AC97_Master_Tone_RL = 0x08,
- AC97_PC_BEEP_Volume_Mute = 0x0A,
- AC97_Phone_Volume_Mute = 0x0C,
- AC97_Mic_Volume_Mute = 0x0E,
- AC97_Line_In_Volume_Mute = 0x10,
- AC97_CD_Volume_Mute = 0x12,
- AC97_Video_Volume_Mute = 0x14,
- AC97_Aux_Volume_Mute = 0x16,
- AC97_PCM_Out_Volume_Mute = 0x18,
- AC97_Record_Select = 0x1A,
- AC97_Record_Gain_Mute = 0x1C,
- AC97_Record_Gain_Mic_Mute = 0x1E,
- AC97_General_Purpose = 0x20,
- AC97_3D_Control = 0x22,
- AC97_AC_97_RESERVED = 0x24,
- AC97_Powerdown_Ctrl_Stat = 0x26,
- AC97_Extended_Audio_ID = 0x28,
- AC97_Extended_Audio_Ctrl_Stat = 0x2A,
- AC97_PCM_Front_DAC_Rate = 0x2C,
- AC97_PCM_Surround_DAC_Rate = 0x2E,
- AC97_PCM_LFE_DAC_Rate = 0x30,
- AC97_PCM_LR_ADC_Rate = 0x32,
- AC97_MIC_ADC_Rate = 0x34,
- AC97_6Ch_Vol_C_LFE_Mute = 0x36,
- AC97_6Ch_Vol_L_R_Surround_Mute = 0x38,
- AC97_Vendor_Reserved = 0x58,
- AC97_Sigmatel_Analog = 0x6c, /* We emulate a Sigmatel codec */
- AC97_Sigmatel_Dac2Invert = 0x6e, /* We emulate a Sigmatel codec */
- AC97_Vendor_ID1 = 0x7c,
- AC97_Vendor_ID2 = 0x7e
-};
+#include "ac97.h"
#define SOFT_VOLUME
#define SR_FIFOE 16 /* rwc */
@@ -121,11 +85,6 @@ enum {
#define BD_IOC (1 << 31)
#define BD_BUP (1 << 30)
-#define EACS_VRA 1
-#define EACS_VRM 8
-
-#define MUTE_SHIFT 15
-
#define TYPE_AC97 "AC97"
OBJECT_DECLARE_SIMPLE_TYPE(AC97LinkState, AC97)
@@ -1295,7 +1254,7 @@ static const MemoryRegionOps ac97_io_nabm_ops = {
static void ac97_on_reset(DeviceState *dev)
{
- AC97LinkState *s = container_of(dev, AC97LinkState, dev.qdev);
+ AC97LinkState *s = AC97(dev);
reset_bm_regs(s, &s->bm_regs[0]);
reset_bm_regs(s, &s->bm_regs[1]);
diff --git a/hw/audio/ac97.h b/hw/audio/ac97.h
new file mode 100644
index 0000000000..0358b56ff4
--- /dev/null
+++ b/hw/audio/ac97.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2006 InnoTek Systemberatung GmbH
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License as published by the Free Software Foundation,
+ * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
+ * distribution. VirtualBox OSE is distributed in the hope that it will
+ * be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * If you received this file as part of a commercial VirtualBox
+ * distribution, then only the terms of your commercial VirtualBox
+ * license agreement apply instead of the previous paragraph.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#ifndef AC97_H
+#define AC97_H
+
+enum {
+ AC97_Reset = 0x00,
+ AC97_Master_Volume_Mute = 0x02,
+ AC97_Headphone_Volume_Mute = 0x04,
+ AC97_Master_Volume_Mono_Mute = 0x06,
+ AC97_Master_Tone_RL = 0x08,
+ AC97_PC_BEEP_Volume_Mute = 0x0A,
+ AC97_Phone_Volume_Mute = 0x0C,
+ AC97_Mic_Volume_Mute = 0x0E,
+ AC97_Line_In_Volume_Mute = 0x10,
+ AC97_CD_Volume_Mute = 0x12,
+ AC97_Video_Volume_Mute = 0x14,
+ AC97_Aux_Volume_Mute = 0x16,
+ AC97_PCM_Out_Volume_Mute = 0x18,
+ AC97_Record_Select = 0x1A,
+ AC97_Record_Gain_Mute = 0x1C,
+ AC97_Record_Gain_Mic_Mute = 0x1E,
+ AC97_General_Purpose = 0x20,
+ AC97_3D_Control = 0x22,
+ AC97_AC_97_RESERVED = 0x24,
+ AC97_Powerdown_Ctrl_Stat = 0x26,
+ AC97_Extended_Audio_ID = 0x28,
+ AC97_Extended_Audio_Ctrl_Stat = 0x2A,
+ AC97_PCM_Front_DAC_Rate = 0x2C,
+ AC97_PCM_Surround_DAC_Rate = 0x2E,
+ AC97_PCM_LFE_DAC_Rate = 0x30,
+ AC97_PCM_LR_ADC_Rate = 0x32,
+ AC97_MIC_ADC_Rate = 0x34,
+ AC97_6Ch_Vol_C_LFE_Mute = 0x36,
+ AC97_6Ch_Vol_L_R_Surround_Mute = 0x38,
+ AC97_Vendor_Reserved = 0x58,
+ AC97_Sigmatel_Analog = 0x6c, /* We emulate a Sigmatel codec */
+ AC97_Sigmatel_Dac2Invert = 0x6e, /* We emulate a Sigmatel codec */
+ AC97_Vendor_ID1 = 0x7c,
+ AC97_Vendor_ID2 = 0x7e
+};
+
+#define EACS_VRA 1
+#define EACS_VRM 8
+
+#define MUTE_SHIFT 15
+
+#endif /* AC97_H */
diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c
index 7f17a72a9c..5c6d643732 100644
--- a/hw/audio/cs4231a.c
+++ b/hw/audio/cs4231a.c
@@ -668,16 +668,17 @@ static void cs4231a_initfn (Object *obj)
static void cs4231a_realizefn (DeviceState *dev, Error **errp)
{
ISADevice *d = ISA_DEVICE (dev);
+ ISABus *bus = isa_bus_from_device(d);
CSState *s = CS4231A (dev);
IsaDmaClass *k;
- s->isa_dma = isa_get_dma(isa_bus_from_device(d), s->dma);
+ s->isa_dma = isa_bus_get_dma(bus, s->dma);
if (!s->isa_dma) {
error_setg(errp, "ISA controller does not support DMA");
return;
}
- s->pic = isa_get_irq(d, s->irq);
+ s->pic = isa_bus_get_irq(bus, s->irq);
k = ISADMA_GET_CLASS(s->isa_dma);
k->register_channel(s->isa_dma, s->dma, cs_dma_read, s);
diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c
index 54cc19a637..4f738a0ad8 100644
--- a/hw/audio/es1370.c
+++ b/hw/audio/es1370.c
@@ -256,6 +256,9 @@ static void print_sctl (uint32_t val)
#define lwarn(...)
#endif
+#define TYPE_ES1370 "ES1370"
+OBJECT_DECLARE_SIMPLE_TYPE(ES1370State, ES1370)
+
struct chan {
uint32_t shift;
uint32_t leftover;
@@ -278,7 +281,6 @@ struct ES1370State {
uint32_t codec;
uint32_t sctl;
};
-typedef struct ES1370State ES1370State;
struct chan_bits {
uint32_t ctl_en;
@@ -292,9 +294,6 @@ struct chan_bits {
uint32_t *old_freq, uint32_t *new_freq);
};
-#define TYPE_ES1370 "ES1370"
-OBJECT_DECLARE_SIMPLE_TYPE(ES1370State, ES1370)
-
static void es1370_dac1_calc_freq (ES1370State *s, uint32_t ctl,
uint32_t *old_freq, uint32_t *new_freq);
static void es1370_dac2_and_adc_calc_freq (ES1370State *s, uint32_t ctl,
@@ -844,7 +843,8 @@ static const VMStateDescription vmstate_es1370 = {
static void es1370_on_reset(DeviceState *dev)
{
- ES1370State *s = container_of(dev, ES1370State, dev.qdev);
+ ES1370State *s = ES1370(dev);
+
es1370_reset (s);
}
diff --git a/hw/audio/gus.c b/hw/audio/gus.c
index 42f010b671..787345ce54 100644
--- a/hw/audio/gus.c
+++ b/hw/audio/gus.c
@@ -236,11 +236,12 @@ static const MemoryRegionPortio gus_portio_list2[] = {
static void gus_realizefn (DeviceState *dev, Error **errp)
{
ISADevice *d = ISA_DEVICE(dev);
+ ISABus *bus = isa_bus_from_device(d);
GUSState *s = GUS (dev);
IsaDmaClass *k;
struct audsettings as;
- s->isa_dma = isa_get_dma(isa_bus_from_device(d), s->emu.gusdma);
+ s->isa_dma = isa_bus_get_dma(bus, s->emu.gusdma);
if (!s->isa_dma) {
error_setg(errp, "ISA controller does not support DMA");
return;
@@ -282,7 +283,7 @@ static void gus_realizefn (DeviceState *dev, Error **errp)
s->emu.himemaddr = s->himem;
s->emu.gusdatapos = s->emu.himemaddr + 1024 * 1024 + 32;
s->emu.opaque = s;
- s->pic = isa_get_irq(d, s->emu.gusirq);
+ s->pic = isa_bus_get_irq(bus, s->emu.gusirq);
AUD_set_active_out (s->voice, 1);
}
diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c
index feb8f9e2bb..c51d8ba617 100644
--- a/hw/audio/hda-codec.c
+++ b/hw/audio/hda-codec.c
@@ -145,7 +145,9 @@ static const char *fmt2name[] = {
[ AUDIO_FORMAT_S32 ] = "PCM-S32",
};
-typedef struct HDAAudioState HDAAudioState;
+#define TYPE_HDA_AUDIO "hda-audio"
+OBJECT_DECLARE_SIMPLE_TYPE(HDAAudioState, HDA_AUDIO)
+
typedef struct HDAAudioStream HDAAudioStream;
struct HDAAudioStream {
@@ -171,9 +173,6 @@ struct HDAAudioStream {
int64_t buft_start;
};
-#define TYPE_HDA_AUDIO "hda-audio"
-OBJECT_DECLARE_SIMPLE_TYPE(HDAAudioState, HDA_AUDIO)
-
struct HDAAudioState {
HDACodecDevice hda;
const char *name;
diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c
index 2215386ddb..535ccccdc9 100644
--- a/hw/audio/sb16.c
+++ b/hw/audio/sb16.c
@@ -1398,17 +1398,18 @@ static void sb16_initfn (Object *obj)
static void sb16_realizefn (DeviceState *dev, Error **errp)
{
ISADevice *isadev = ISA_DEVICE (dev);
+ ISABus *bus = isa_bus_from_device(isadev);
SB16State *s = SB16 (dev);
IsaDmaClass *k;
- s->isa_hdma = isa_get_dma(isa_bus_from_device(isadev), s->hdma);
- s->isa_dma = isa_get_dma(isa_bus_from_device(isadev), s->dma);
+ s->isa_hdma = isa_bus_get_dma(bus, s->hdma);
+ s->isa_dma = isa_bus_get_dma(bus, s->dma);
if (!s->isa_dma || !s->isa_hdma) {
error_setg(errp, "ISA controller does not support DMA");
return;
}
- s->pic = isa_get_irq(isadev, s->irq);
+ s->pic = isa_bus_get_irq(bus, s->irq);
s->mixer_regs[0x80] = magic_of_irq (s->irq);
s->mixer_regs[0x81] = (1 << s->dma) | (1 << s->hdma);
diff --git a/hw/block/fdc-isa.c b/hw/block/fdc-isa.c
index fee1ca68a8..7ec075e470 100644
--- a/hw/block/fdc-isa.c
+++ b/hw/block/fdc-isa.c
@@ -86,6 +86,7 @@ static const MemoryRegionPortio fdc_portio_list[] = {
static void isabus_fdc_realize(DeviceState *dev, Error **errp)
{
ISADevice *isadev = ISA_DEVICE(dev);
+ ISABus *bus = isa_bus_from_device(isadev);
FDCtrlISABus *isa = ISA_FDC(dev);
FDCtrl *fdctrl = &isa->state;
Error *err = NULL;
@@ -94,11 +95,11 @@ static void isabus_fdc_realize(DeviceState *dev, Error **errp)
isa->iobase, fdc_portio_list, fdctrl,
"fdc");
- fdctrl->irq = isa_get_irq(isadev, isa->irq);
+ fdctrl->irq = isa_bus_get_irq(bus, isa->irq);
fdctrl->dma_chann = isa->dma;
if (fdctrl->dma_chann != -1) {
IsaDmaClass *k;
- fdctrl->dma = isa_get_dma(isa_bus_from_device(isadev), isa->dma);
+ fdctrl->dma = isa_bus_get_dma(bus, isa->dma);
if (!fdctrl->dma) {
error_setg(errp, "ISA controller does not support DMA");
return;
diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c
index eb5ba1aff7..e03165febf 100644
--- a/hw/core/ptimer.c
+++ b/hw/core/ptimer.c
@@ -10,7 +10,7 @@
#include "hw/ptimer.h"
#include "migration/vmstate.h"
#include "qemu/host-utils.h"
-#include "sysemu/replay.h"
+#include "exec/replay-core.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/qtest.h"
#include "block/aio.h"
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index d759c4602c..43d863b0c5 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -330,7 +330,7 @@ bool qdev_machine_modified(void)
return qdev_hot_added || qdev_hot_removed;
}
-BusState *qdev_get_parent_bus(DeviceState *dev)
+BusState *qdev_get_parent_bus(const DeviceState *dev)
{
return dev->parent_bus;
}
diff --git a/hw/display/sm501.c b/hw/display/sm501.c
index e1d0591d36..17835159fc 100644
--- a/hw/display/sm501.c
+++ b/hw/display/sm501.c
@@ -28,6 +28,7 @@
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
+#include "hw/usb/hcd-ohci.h"
#include "hw/char/serial.h"
#include "ui/console.h"
#include "hw/sysbus.h"
@@ -691,7 +692,7 @@ static void sm501_2d_operation(SM501State *s)
unsigned int dst_pitch = (s->twoD_pitch >> 16) & 0x1FFF;
int crt = (s->dc_crt_control & SM501_DC_CRT_CONTROL_SEL) ? 1 : 0;
int fb_len = get_width(s, crt) * get_height(s, crt) * get_bpp(s, crt);
- bool overlap = false;
+ bool overlap = false, fallback = false;
if ((s->twoD_stretch >> 16) & 0xF) {
qemu_log_mask(LOG_UNIMP, "sm501: only XY addressing is supported.\n");
@@ -753,7 +754,7 @@ static void sm501_2d_operation(SM501State *s)
}
if ((rop_mode && rop == 0x5) || (!rop_mode && rop == 0x55)) {
- /* Invert dest, is there a way to do this with pixman? */
+ /* DSTINVERT, is there a way to do this with pixman? */
unsigned int x, y, i;
uint8_t *d = s->local_mem + dst_base;
@@ -763,6 +764,34 @@ static void sm501_2d_operation(SM501State *s)
stn_he_p(&d[i], bypp, ~ldn_he_p(&d[i], bypp));
}
}
+ } else if (!rop_mode && rop == 0x99) {
+ /* DSxn, is there a way to do this with pixman? */
+ unsigned int x, y, i, j;
+ uint8_t *sp = s->local_mem + src_base;
+ uint8_t *d = s->local_mem + dst_base;
+
+ for (y = 0; y < height; y++) {
+ i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
+ j = (src_x + (src_y + y) * src_pitch) * bypp;
+ for (x = 0; x < width; x++, i += bypp, j += bypp) {
+ stn_he_p(&d[i], bypp,
+ ~(ldn_he_p(&sp[j], bypp) ^ ldn_he_p(&d[i], bypp)));
+ }
+ }
+ } else if (!rop_mode && rop == 0xee) {
+ /* SRCPAINT, is there a way to do this with pixman? */
+ unsigned int x, y, i, j;
+ uint8_t *sp = s->local_mem + src_base;
+ uint8_t *d = s->local_mem + dst_base;
+
+ for (y = 0; y < height; y++) {
+ i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
+ j = (src_x + (src_y + y) * src_pitch) * bypp;
+ for (x = 0; x < width; x++, i += bypp, j += bypp) {
+ stn_he_p(&d[i], bypp,
+ ldn_he_p(&sp[j], bypp) | ldn_he_p(&d[i], bypp));
+ }
+ }
} else {
/* Do copy src for unimplemented ops, better than unpainted area */
if ((rop_mode && (rop != 0xc || rop2_source_is_pattern)) ||
@@ -806,25 +835,48 @@ static void sm501_2d_operation(SM501State *s)
if (tmp_stride * sizeof(uint32_t) * height > sizeof(tmp_buf)) {
tmp = g_malloc(tmp_stride * sizeof(uint32_t) * height);
}
- pixman_blt((uint32_t *)&s->local_mem[src_base], tmp,
- src_pitch * bypp / sizeof(uint32_t),
- tmp_stride, 8 * bypp, 8 * bypp,
- src_x, src_y, 0, 0, width, height);
- pixman_blt(tmp, (uint32_t *)&s->local_mem[dst_base],
- tmp_stride,
- dst_pitch * bypp / sizeof(uint32_t),
- 8 * bypp, 8 * bypp,
- 0, 0, dst_x, dst_y, width, height);
+ fallback = !pixman_blt((uint32_t *)&s->local_mem[src_base],
+ tmp,
+ src_pitch * bypp / sizeof(uint32_t),
+ tmp_stride,
+ 8 * bypp, 8 * bypp,
+ src_x, src_y, 0, 0, width, height);
+ if (!fallback) {
+ fallback = !pixman_blt(tmp,
+ (uint32_t *)&s->local_mem[dst_base],
+ tmp_stride,
+ dst_pitch * bypp / sizeof(uint32_t),
+ 8 * bypp, 8 * bypp,
+ 0, 0, dst_x, dst_y, width, height);
+ }
if (tmp != tmp_buf) {
g_free(tmp);
}
} else {
- pixman_blt((uint32_t *)&s->local_mem[src_base],
- (uint32_t *)&s->local_mem[dst_base],
- src_pitch * bypp / sizeof(uint32_t),
- dst_pitch * bypp / sizeof(uint32_t),
- 8 * bypp, 8 * bypp,
- src_x, src_y, dst_x, dst_y, width, height);
+ fallback = !pixman_blt((uint32_t *)&s->local_mem[src_base],
+ (uint32_t *)&s->local_mem[dst_base],
+ src_pitch * bypp / sizeof(uint32_t),
+ dst_pitch * bypp / sizeof(uint32_t),
+ 8 * bypp, 8 * bypp, src_x, src_y,
+ dst_x, dst_y, width, height);
+ }
+ if (fallback) {
+ uint8_t *sp = s->local_mem + src_base;
+ uint8_t *d = s->local_mem + dst_base;
+ unsigned int y, i, j;
+ for (y = 0; y < height; y++) {
+ if (overlap) { /* overlap also means rtl */
+ i = (dst_y + height - 1 - y) * dst_pitch;
+ i = (dst_x + i) * bypp;
+ j = (src_y + height - 1 - y) * src_pitch;
+ j = (src_x + j) * bypp;
+ memmove(&d[i], &sp[j], width * bypp);
+ } else {
+ i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
+ j = (src_x + (src_y + y) * src_pitch) * bypp;
+ memcpy(&d[i], &sp[j], width * bypp);
+ }
+ }
}
}
break;
@@ -839,13 +891,19 @@ static void sm501_2d_operation(SM501State *s)
color = cpu_to_le16(color);
}
- if (width == 1 && height == 1) {
- unsigned int i = (dst_x + dst_y * dst_pitch) * bypp;
- stn_he_p(&s->local_mem[dst_base + i], bypp, color);
- } else {
- pixman_fill((uint32_t *)&s->local_mem[dst_base],
- dst_pitch * bypp / sizeof(uint32_t),
- 8 * bypp, dst_x, dst_y, width, height, color);
+ if ((width == 1 && height == 1) ||
+ !pixman_fill((uint32_t *)&s->local_mem[dst_base],
+ dst_pitch * bypp / sizeof(uint32_t), 8 * bypp,
+ dst_x, dst_y, width, height, color)) {
+ /* fallback when pixman failed or we don't want to call it */
+ uint8_t *d = s->local_mem + dst_base;
+ unsigned int x, y, i;
+ for (y = 0; y < height; y++, i += dst_pitch * bypp) {
+ i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
+ for (x = 0; x < width; x++, i += bypp) {
+ stn_he_p(&d[i], bypp, color);
+ }
+ }
}
break;
}
@@ -1943,15 +2001,14 @@ struct SM501SysBusState {
/*< public >*/
SM501State state;
uint32_t vram_size;
- uint32_t base;
SerialMM serial;
+ OHCISysBusState ohci;
};
static void sm501_realize_sysbus(DeviceState *dev, Error **errp)
{
SM501SysBusState *s = SYSBUS_SM501(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- DeviceState *usb_dev;
MemoryRegion *mr;
sm501_init(&s->state, dev, s->vram_size);
@@ -1964,13 +2021,10 @@ static void sm501_realize_sysbus(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->state.mmio_region);
/* bridge to usb host emulation module */
- usb_dev = qdev_new("sysbus-ohci");
- qdev_prop_set_uint32(usb_dev, "num-ports", 2);
- qdev_prop_set_uint64(usb_dev, "dma-offset", s->base);
- sysbus_realize_and_unref(SYS_BUS_DEVICE(usb_dev), &error_fatal);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->ohci), &error_fatal);
memory_region_add_subregion(&s->state.mmio_region, SM501_USB_HOST,
- sysbus_mmio_get_region(SYS_BUS_DEVICE(usb_dev), 0));
- sysbus_pass_irq(sbd, SYS_BUS_DEVICE(usb_dev));
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->ohci), 0));
+ sysbus_pass_irq(sbd, SYS_BUS_DEVICE(&s->ohci));
/* bridge to serial emulation module */
sysbus_realize(SYS_BUS_DEVICE(&s->serial), &error_fatal);
@@ -1981,7 +2035,6 @@ static void sm501_realize_sysbus(DeviceState *dev, Error **errp)
static Property sm501_sysbus_properties[] = {
DEFINE_PROP_UINT32("vram-size", SM501SysBusState, vram_size, 0),
- DEFINE_PROP_UINT32("base", SM501SysBusState, base, 0),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2017,15 +2070,19 @@ static void sm501_sysbus_class_init(ObjectClass *klass, void *data)
static void sm501_sysbus_init(Object *o)
{
SM501SysBusState *sm501 = SYSBUS_SM501(o);
+ OHCISysBusState *ohci = &sm501->ohci;
SerialMM *smm = &sm501->serial;
+ object_initialize_child(o, "ohci", ohci, TYPE_SYSBUS_OHCI);
+ object_property_add_alias(o, "dma-offset", OBJECT(ohci), "dma-offset");
+ qdev_prop_set_uint32(DEVICE(ohci), "num-ports", 2);
+
object_initialize_child(o, "serial", smm, TYPE_SERIAL_MM);
qdev_set_legacy_instance_id(DEVICE(smm), SM501_UART0, 2);
qdev_prop_set_uint8(DEVICE(smm), "regshift", 2);
qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN);
- object_property_add_alias(o, "chardev",
- OBJECT(smm), "chardev");
+ object_property_add_alias(o, "chardev", OBJECT(smm), "chardev");
}
static const TypeInfo sm501_sysbus_info = {
diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c
index 34c3aaf7d3..63734c22c9 100644
--- a/hw/dma/i82374.c
+++ b/hw/dma/i82374.c
@@ -125,7 +125,7 @@ static void i82374_realize(DeviceState *dev, Error **errp)
I82374State *s = I82374(dev);
ISABus *isa_bus = isa_bus_from_device(ISA_DEVICE(dev));
- if (isa_get_dma(isa_bus, 0)) {
+ if (isa_bus_get_dma(isa_bus, 0)) {
error_setg(errp, "DMA already initialized on ISA bus");
return;
}
diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c
index 7ac68c943f..8fea5fa6b8 100644
--- a/hw/hppa/machine.c
+++ b/hw/hppa/machine.c
@@ -98,7 +98,7 @@ static ISABus *hppa_isa_bus(void)
isa_irqs = i8259_init(isa_bus,
/* qemu_allocate_irq(dino_set_isa_irq, s, 0)); */
NULL);
- isa_bus_irqs(isa_bus, isa_irqs);
+ isa_bus_register_input_irqs(isa_bus, isa_irqs);
return isa_bus;
}
diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c
index 52ba77f3fc..18d40e93c1 100644
--- a/hw/i2c/smbus_ich9.c
+++ b/hw/i2c/smbus_ich9.c
@@ -27,7 +27,7 @@
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "hw/i386/ich9.h"
+#include "hw/southbridge/ich9.h"
#include "qom/object.h"
#include "hw/acpi/acpi_aml_interface.h"
@@ -80,6 +80,18 @@ static void ich9_smbus_write_config(PCIDevice *d, uint32_t address,
}
}
+static void ich9_smb_set_irq(PMSMBus *pmsmb, bool enabled)
+{
+ ICH9SMBState *s = pmsmb->opaque;
+
+ if (enabled == s->irq_enabled) {
+ return;
+ }
+
+ s->irq_enabled = enabled;
+ pci_set_irq(&s->dev, enabled);
+}
+
static void ich9_smbus_realize(PCIDevice *d, Error **errp)
{
ICH9SMBState *s = ICH9_SMB_DEVICE(d);
@@ -93,6 +105,9 @@ static void ich9_smbus_realize(PCIDevice *d, Error **errp)
pm_smbus_init(&d->qdev, &s->smb, false);
pci_register_bar(d, ICH9_SMB_SMB_BASE_BAR, PCI_BASE_ADDRESS_SPACE_IO,
&s->smb.io);
+
+ s->smb.set_irq = ich9_smb_set_irq;
+ s->smb.opaque = s;
}
static void build_ich9_smb_aml(AcpiDevAmlIf *adev, Aml *scope)
@@ -125,28 +140,6 @@ static void ich9_smb_class_init(ObjectClass *klass, void *data)
adevc->build_dev_aml = build_ich9_smb_aml;
}
-static void ich9_smb_set_irq(PMSMBus *pmsmb, bool enabled)
-{
- ICH9SMBState *s = pmsmb->opaque;
-
- if (enabled == s->irq_enabled) {
- return;
- }
-
- s->irq_enabled = enabled;
- pci_set_irq(&s->dev, enabled);
-}
-
-I2CBus *ich9_smb_init(PCIBus *bus, int devfn, uint32_t smb_io_base)
-{
- PCIDevice *d =
- pci_create_simple_multifunction(bus, devfn, true, TYPE_ICH9_SMB_DEVICE);
- ICH9SMBState *s = ICH9_SMB_DEVICE(d);
- s->smb.set_irq = ich9_smb_set_irq;
- s->smb.opaque = s;
- return s->smb.smbus;
-}
-
static const TypeInfo ich9_smb_info = {
.name = TYPE_ICH9_SMB_DEVICE,
.parent = TYPE_PCI_DEVICE,
diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig
index 9fbfe748b5..d40802d83f 100644
--- a/hw/i386/Kconfig
+++ b/hw/i386/Kconfig
@@ -136,3 +136,8 @@ config VMPORT
config VMMOUSE
bool
depends on VMPORT
+
+config XEN_EMU
+ bool
+ default y
+ depends on KVM && (I386 || X86_64)
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index b67dcbbb37..d27921fd8f 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -55,10 +55,11 @@
#include "hw/hyperv/vmbus-bridge.h"
/* Supported chipsets: */
+#include "hw/southbridge/ich9.h"
#include "hw/southbridge/piix.h"
#include "hw/acpi/pcihp.h"
#include "hw/i386/fw_cfg.h"
-#include "hw/i386/ich9.h"
+#include "hw/i386/pc.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci-host/i440fx.h"
#include "hw/pci-host/q35.h"
diff --git a/hw/i386/kvm/ioapic.c b/hw/i386/kvm/ioapic.c
index 272e26b4a2..cd5ea5d60b 100644
--- a/hw/i386/kvm/ioapic.c
+++ b/hw/i386/kvm/ioapic.c
@@ -12,9 +12,8 @@
#include "qemu/osdep.h"
#include "monitor/monitor.h"
-#include "hw/i386/x86.h"
#include "hw/qdev-properties.h"
-#include "hw/i386/ioapic_internal.h"
+#include "hw/intc/ioapic_internal.h"
#include "hw/intc/kvm_irqcount.h"
#include "sysemu/kvm.h"
diff --git a/hw/i386/kvm/meson.build b/hw/i386/kvm/meson.build
index 95467f1ded..82dd6ae7c6 100644
--- a/hw/i386/kvm/meson.build
+++ b/hw/i386/kvm/meson.build
@@ -4,5 +4,18 @@ i386_kvm_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c'))
i386_kvm_ss.add(when: 'CONFIG_I8254', if_true: files('i8254.c'))
i386_kvm_ss.add(when: 'CONFIG_I8259', if_true: files('i8259.c'))
i386_kvm_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic.c'))
+i386_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files(
+ 'xen_overlay.c',
+ 'xen_evtchn.c',
+ 'xen_gnttab.c',
+ 'xen_xenstore.c',
+ ))
i386_ss.add_all(when: 'CONFIG_KVM', if_true: i386_kvm_ss)
+
+xen_stubs_ss = ss.source_set()
+xen_stubs_ss.add(when: 'CONFIG_XEN_EMU', if_false: files(
+ 'xen-stubs.c',
+))
+
+specific_ss.add_all(when: 'CONFIG_SOFTMMU', if_true: xen_stubs_ss)
diff --git a/hw/i386/kvm/trace-events b/hw/i386/kvm/trace-events
new file mode 100644
index 0000000000..b83c3eb965
--- /dev/null
+++ b/hw/i386/kvm/trace-events
@@ -0,0 +1,5 @@
+kvm_xen_map_pirq(int pirq, int gsi) "pirq %d gsi %d"
+kvm_xen_unmap_pirq(int pirq, int gsi) "pirq %d gsi %d"
+kvm_xen_get_free_pirq(int pirq, int type) "pirq %d type %d"
+kvm_xen_bind_pirq(int pirq, int port) "pirq %d port %d"
+kvm_xen_unmask_pirq(int pirq, char *dev, int vector) "pirq %d dev %s vector %d"
diff --git a/hw/i386/kvm/trace.h b/hw/i386/kvm/trace.h
new file mode 100644
index 0000000000..e55d0812fd
--- /dev/null
+++ b/hw/i386/kvm/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_i386_kvm.h"
diff --git a/hw/i386/kvm/xen-stubs.c b/hw/i386/kvm/xen-stubs.c
new file mode 100644
index 0000000000..ae406e0b02
--- /dev/null
+++ b/hw/i386/kvm/xen-stubs.c
@@ -0,0 +1,44 @@
+/*
+ * QEMU Xen emulation: QMP stubs
+ *
+ * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+
+#include "qapi/error.h"
+#include "qapi/qapi-commands-misc-target.h"
+
+#include "xen_evtchn.h"
+
+void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
+ uint64_t addr, uint32_t data, bool is_masked)
+{
+}
+
+void xen_evtchn_remove_pci_device(PCIDevice *dev)
+{
+}
+
+bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
+{
+ return false;
+}
+
+#ifdef TARGET_I386
+EvtchnInfoList *qmp_xen_event_list(Error **errp)
+{
+ error_setg(errp, "Xen event channel emulation not enabled");
+ return NULL;
+}
+
+void qmp_xen_event_inject(uint32_t port, Error **errp)
+{
+ error_setg(errp, "Xen event channel emulation not enabled");
+}
+#endif
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
new file mode 100644
index 0000000000..886fbf6b3b
--- /dev/null
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -0,0 +1,2341 @@
+/*
+ * QEMU Xen emulation: Event channel support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "qemu/module.h"
+#include "qemu/lockable.h"
+#include "qemu/main-loop.h"
+#include "qemu/log.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qmp/qdict.h"
+#include "qom/object.h"
+#include "exec/target_page.h"
+#include "exec/address-spaces.h"
+#include "migration/vmstate.h"
+#include "trace.h"
+
+#include "hw/sysbus.h"
+#include "hw/xen/xen.h"
+#include "hw/i386/x86.h"
+#include "hw/i386/pc.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "hw/irq.h"
+
+#include "xen_evtchn.h"
+#include "xen_overlay.h"
+#include "xen_xenstore.h"
+
+#include "sysemu/kvm.h"
+#include "sysemu/kvm_xen.h"
+#include <linux/kvm.h>
+#include <sys/eventfd.h>
+
+#include "hw/xen/interface/memory.h"
+#include "hw/xen/interface/hvm/params.h"
+
+/* XX: For kvm_update_msi_routes_all() */
+#include "target/i386/kvm/kvm_i386.h"
+
+#define TYPE_XEN_EVTCHN "xen-evtchn"
+OBJECT_DECLARE_SIMPLE_TYPE(XenEvtchnState, XEN_EVTCHN)
+
+typedef struct XenEvtchnPort {
+ uint32_t vcpu; /* Xen/ACPI vcpu_id */
+ uint16_t type; /* EVTCHNSTAT_xxxx */
+ uint16_t type_val; /* pirq# / virq# / remote port according to type */
+} XenEvtchnPort;
+
+/* 32-bit compatibility definitions, also used natively in 32-bit build */
+struct compat_arch_vcpu_info {
+ unsigned int cr2;
+ unsigned int pad[5];
+};
+
+struct compat_vcpu_info {
+ uint8_t evtchn_upcall_pending;
+ uint8_t evtchn_upcall_mask;
+ uint16_t pad;
+ uint32_t evtchn_pending_sel;
+ struct compat_arch_vcpu_info arch;
+ struct vcpu_time_info time;
+}; /* 64 bytes (x86) */
+
+struct compat_arch_shared_info {
+ unsigned int max_pfn;
+ unsigned int pfn_to_mfn_frame_list_list;
+ unsigned int nmi_reason;
+ unsigned int p2m_cr3;
+ unsigned int p2m_vaddr;
+ unsigned int p2m_generation;
+ uint32_t wc_sec_hi;
+};
+
+struct compat_shared_info {
+ struct compat_vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
+ uint32_t evtchn_pending[32];
+ uint32_t evtchn_mask[32];
+ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
+ uint32_t wc_sec;
+ uint32_t wc_nsec;
+ struct compat_arch_shared_info arch;
+};
+
+#define COMPAT_EVTCHN_2L_NR_CHANNELS 1024
+
+/* Local private implementation of struct xenevtchn_handle */
+struct xenevtchn_handle {
+ evtchn_port_t be_port;
+ evtchn_port_t guest_port; /* Or zero for unbound */
+ int fd;
+};
+
+/*
+ * For unbound/interdomain ports there are only two possible remote
+ * domains; self and QEMU. Use a single high bit in type_val for that,
+ * and the low bits for the remote port number (or 0 for unbound).
+ */
+#define PORT_INFO_TYPEVAL_REMOTE_QEMU 0x8000
+#define PORT_INFO_TYPEVAL_REMOTE_PORT_MASK 0x7FFF
+
+/*
+ * These 'emuirq' values are used by Xen in the LM stream... and yes, I am
+ * insane enough to think about guest-transparent live migration from actual
+ * Xen to QEMU, and ensuring that we can convert/consume the stream.
+ */
+#define IRQ_UNBOUND -1
+#define IRQ_PT -2
+#define IRQ_MSI_EMU -3
+
+
+struct pirq_info {
+ int gsi;
+ uint16_t port;
+ PCIDevice *dev;
+ int vector;
+ bool is_msix;
+ bool is_masked;
+ bool is_translated;
+};
+
+struct XenEvtchnState {
+ /*< private >*/
+ SysBusDevice busdev;
+ /*< public >*/
+
+ uint64_t callback_param;
+ bool evtchn_in_kernel;
+ uint32_t callback_gsi;
+
+ QEMUBH *gsi_bh;
+
+ QemuMutex port_lock;
+ uint32_t nr_ports;
+ XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS];
+ qemu_irq gsis[IOAPIC_NUM_PINS];
+
+ struct xenevtchn_handle *be_handles[EVTCHN_2L_NR_CHANNELS];
+
+ uint32_t nr_pirqs;
+
+ /* Bitmap of allocated PIRQs (serialized) */
+ uint16_t nr_pirq_inuse_words;
+ uint64_t *pirq_inuse_bitmap;
+
+ /* GSI → PIRQ mapping (serialized) */
+ uint16_t gsi_pirq[IOAPIC_NUM_PINS];
+
+ /* Per-GSI assertion state (serialized) */
+ uint32_t pirq_gsi_set;
+
+ /* Per-PIRQ information (rebuilt on migration, protected by BQL) */
+ struct pirq_info *pirq;
+};
+
+#define pirq_inuse_word(s, pirq) (s->pirq_inuse_bitmap[((pirq) / 64)])
+#define pirq_inuse_bit(pirq) (1ULL << ((pirq) & 63))
+
+#define pirq_inuse(s, pirq) (pirq_inuse_word(s, pirq) & pirq_inuse_bit(pirq))
+
+struct XenEvtchnState *xen_evtchn_singleton;
+
+/* Top bits of callback_param are the type (HVM_PARAM_CALLBACK_TYPE_xxx) */
+#define CALLBACK_VIA_TYPE_SHIFT 56
+
+static void unbind_backend_ports(XenEvtchnState *s);
+
+static int xen_evtchn_pre_load(void *opaque)
+{
+ XenEvtchnState *s = opaque;
+
+ /* Unbind all the backend-side ports; they need to rebind */
+ unbind_backend_ports(s);
+
+ /* It'll be leaked otherwise. */
+ g_free(s->pirq_inuse_bitmap);
+ s->pirq_inuse_bitmap = NULL;
+
+ return 0;
+}
+
+static int xen_evtchn_post_load(void *opaque, int version_id)
+{
+ XenEvtchnState *s = opaque;
+ uint32_t i;
+
+ if (s->callback_param) {
+ xen_evtchn_set_callback_param(s->callback_param);
+ }
+
+ /* Rebuild s->pirq[].port mapping */
+ for (i = 0; i < s->nr_ports; i++) {
+ XenEvtchnPort *p = &s->port_table[i];
+
+ if (p->type == EVTCHNSTAT_pirq) {
+ assert(p->type_val);
+ assert(p->type_val < s->nr_pirqs);
+
+ /*
+ * Set the gsi to IRQ_UNBOUND; it may be changed to an actual
+ * GSI# below, or to IRQ_MSI_EMU when the MSI table snooping
+ * catches up with it.
+ */
+ s->pirq[p->type_val].gsi = IRQ_UNBOUND;
+ s->pirq[p->type_val].port = i;
+ }
+ }
+ /* Rebuild s->pirq[].gsi mapping */
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ if (s->gsi_pirq[i]) {
+ s->pirq[s->gsi_pirq[i]].gsi = i;
+ }
+ }
+ return 0;
+}
+
+static bool xen_evtchn_is_needed(void *opaque)
+{
+ return xen_mode == XEN_EMULATE;
+}
+
+static const VMStateDescription xen_evtchn_port_vmstate = {
+ .name = "xen_evtchn_port",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(vcpu, XenEvtchnPort),
+ VMSTATE_UINT16(type, XenEvtchnPort),
+ VMSTATE_UINT16(type_val, XenEvtchnPort),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription xen_evtchn_vmstate = {
+ .name = "xen_evtchn",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xen_evtchn_is_needed,
+ .pre_load = xen_evtchn_pre_load,
+ .post_load = xen_evtchn_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(callback_param, XenEvtchnState),
+ VMSTATE_UINT32(nr_ports, XenEvtchnState),
+ VMSTATE_STRUCT_VARRAY_UINT32(port_table, XenEvtchnState, nr_ports, 1,
+ xen_evtchn_port_vmstate, XenEvtchnPort),
+ VMSTATE_UINT16_ARRAY(gsi_pirq, XenEvtchnState, IOAPIC_NUM_PINS),
+ VMSTATE_VARRAY_UINT16_ALLOC(pirq_inuse_bitmap, XenEvtchnState,
+ nr_pirq_inuse_words, 0,
+ vmstate_info_uint64, uint64_t),
+ VMSTATE_UINT32(pirq_gsi_set, XenEvtchnState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void xen_evtchn_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &xen_evtchn_vmstate;
+}
+
+static const TypeInfo xen_evtchn_info = {
+ .name = TYPE_XEN_EVTCHN,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XenEvtchnState),
+ .class_init = xen_evtchn_class_init,
+};
+
+static void gsi_assert_bh(void *opaque)
+{
+ struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0);
+ if (vi) {
+ xen_evtchn_set_callback_level(!!vi->evtchn_upcall_pending);
+ }
+}
+
+void xen_evtchn_create(void)
+{
+ XenEvtchnState *s = XEN_EVTCHN(sysbus_create_simple(TYPE_XEN_EVTCHN,
+ -1, NULL));
+ int i;
+
+ xen_evtchn_singleton = s;
+
+ qemu_mutex_init(&s->port_lock);
+ s->gsi_bh = aio_bh_new(qemu_get_aio_context(), gsi_assert_bh, s);
+
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(s), &s->gsis[i]);
+ }
+
+ /*
+ * The Xen scheme for encoding PIRQ# into an MSI message is not
+ * compatible with 32-bit MSI, as it puts the high bits of the
+ * PIRQ# into the high bits of the MSI message address, instead of
+ * using the Extended Destination ID in address bits 4-11 which
+ * perhaps would have been a better choice.
+ *
+ * To keep life simple, kvm_accel_instance_init() initialises the
+ * default to 256. which conveniently doesn't need to set anything
+ * outside the low 32 bits of the address. It can be increased by
+ * setting the xen-evtchn-max-pirq property.
+ */
+ s->nr_pirqs = kvm_xen_get_evtchn_max_pirq();
+
+ s->nr_pirq_inuse_words = DIV_ROUND_UP(s->nr_pirqs, 64);
+ s->pirq_inuse_bitmap = g_new0(uint64_t, s->nr_pirq_inuse_words);
+ s->pirq = g_new0(struct pirq_info, s->nr_pirqs);
+}
+
+void xen_evtchn_connect_gsis(qemu_irq *system_gsis)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int i;
+
+ if (!s) {
+ return;
+ }
+
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(s), i, system_gsis[i]);
+ }
+}
+
+static void xen_evtchn_register_types(void)
+{
+ type_register_static(&xen_evtchn_info);
+}
+
+type_init(xen_evtchn_register_types)
+
+static int set_callback_pci_intx(XenEvtchnState *s, uint64_t param)
+{
+ PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
+ uint8_t pin = param & 3;
+ uint8_t devfn = (param >> 8) & 0xff;
+ uint16_t bus = (param >> 16) & 0xffff;
+ uint16_t domain = (param >> 32) & 0xffff;
+ PCIDevice *pdev;
+ PCIINTxRoute r;
+
+ if (domain || !pcms) {
+ return 0;
+ }
+
+ pdev = pci_find_device(pcms->bus, bus, devfn);
+ if (!pdev) {
+ return 0;
+ }
+
+ r = pci_device_route_intx_to_irq(pdev, pin);
+ if (r.mode != PCI_INTX_ENABLED) {
+ return 0;
+ }
+
+ /*
+ * Hm, can we be notified of INTX routing changes? Not without
+ * *owning* the device and being allowed to overwrite its own
+ * ->intx_routing_notifier, AFAICT. So let's not.
+ */
+ return r.irq;
+}
+
+void xen_evtchn_set_callback_level(int level)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ if (!s) {
+ return;
+ }
+
+ /*
+ * We get to this function in a number of ways:
+ *
+ * • From I/O context, via PV backend drivers sending a notification to
+ * the guest.
+ *
+ * • From guest vCPU context, via loopback interdomain event channels
+ * (or theoretically even IPIs but guests don't use those with GSI
+ * delivery because that's pointless. We don't want a malicious guest
+ * to be able to trigger a deadlock though, so we can't rule it out.)
+ *
+ * • From guest vCPU context when the HVM_PARAM_CALLBACK_IRQ is being
+ * configured.
+ *
+ * • From guest vCPU context in the KVM exit handler, if the upcall
+ * pending flag has been cleared and the GSI needs to be deasserted.
+ *
+ * • Maybe in future, in an interrupt ack/eoi notifier when the GSI has
+ * been acked in the irqchip.
+ *
+ * Whichever context we come from if we aren't already holding the BQL
+ * then e can't take it now, as we may already hold s->port_lock. So
+ * trigger the BH to set the IRQ for us instead of doing it immediately.
+ *
+ * In the HVM_PARAM_CALLBACK_IRQ and KVM exit handler cases, the caller
+ * will deliberately take the BQL because they want the change to take
+ * effect immediately. That just leaves interdomain loopback as the case
+ * which uses the BH.
+ */
+ if (!qemu_mutex_iothread_locked()) {
+ qemu_bh_schedule(s->gsi_bh);
+ return;
+ }
+
+ if (s->callback_gsi && s->callback_gsi < IOAPIC_NUM_PINS) {
+ qemu_set_irq(s->gsis[s->callback_gsi], level);
+ if (level) {
+ /* Ensure the vCPU polls for deassertion */
+ kvm_xen_set_callback_asserted();
+ }
+ }
+}
+
+int xen_evtchn_set_callback_param(uint64_t param)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ struct kvm_xen_hvm_attr xa = {
+ .type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
+ .u.vector = 0,
+ };
+ bool in_kernel = false;
+ uint32_t gsi = 0;
+ int type = param >> CALLBACK_VIA_TYPE_SHIFT;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ /*
+ * We need the BQL because set_callback_pci_intx() may call into PCI code,
+ * and because we may need to manipulate the old and new GSI levels.
+ */
+ assert(qemu_mutex_iothread_locked());
+ qemu_mutex_lock(&s->port_lock);
+
+ switch (type) {
+ case HVM_PARAM_CALLBACK_TYPE_VECTOR: {
+ xa.u.vector = (uint8_t)param,
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa);
+ if (!ret && kvm_xen_has_cap(EVTCHN_SEND)) {
+ in_kernel = true;
+ }
+ gsi = 0;
+ break;
+ }
+
+ case HVM_PARAM_CALLBACK_TYPE_PCI_INTX:
+ gsi = set_callback_pci_intx(s, param);
+ ret = gsi ? 0 : -EINVAL;
+ break;
+
+ case HVM_PARAM_CALLBACK_TYPE_GSI:
+ gsi = (uint32_t)param;
+ ret = 0;
+ break;
+
+ default:
+ /* Xen doesn't return error even if you set something bogus */
+ ret = 0;
+ break;
+ }
+
+ if (!ret) {
+ /* If vector delivery was turned *off* then tell the kernel */
+ if ((s->callback_param >> CALLBACK_VIA_TYPE_SHIFT) ==
+ HVM_PARAM_CALLBACK_TYPE_VECTOR && !xa.u.vector) {
+ kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa);
+ }
+ s->callback_param = param;
+ s->evtchn_in_kernel = in_kernel;
+
+ if (gsi != s->callback_gsi) {
+ struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0);
+
+ xen_evtchn_set_callback_level(0);
+ s->callback_gsi = gsi;
+
+ if (gsi && vi && vi->evtchn_upcall_pending) {
+ kvm_xen_inject_vcpu_callback_vector(0, type);
+ }
+ }
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+static void inject_callback(XenEvtchnState *s, uint32_t vcpu)
+{
+ int type = s->callback_param >> CALLBACK_VIA_TYPE_SHIFT;
+
+ kvm_xen_inject_vcpu_callback_vector(vcpu, type);
+}
+
+static void deassign_kernel_port(evtchn_port_t port)
+{
+ struct kvm_xen_hvm_attr ha;
+ int ret;
+
+ ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
+ ha.u.evtchn.send_port = port;
+ ha.u.evtchn.flags = KVM_XEN_EVTCHN_DEASSIGN;
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Failed to unbind kernel port %d: %s\n",
+ port, strerror(ret));
+ }
+}
+
+static int assign_kernel_port(uint16_t type, evtchn_port_t port,
+ uint32_t vcpu_id)
+{
+ CPUState *cpu = qemu_get_cpu(vcpu_id);
+ struct kvm_xen_hvm_attr ha;
+
+ if (!cpu) {
+ return -ENOENT;
+ }
+
+ ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
+ ha.u.evtchn.send_port = port;
+ ha.u.evtchn.type = type;
+ ha.u.evtchn.flags = 0;
+ ha.u.evtchn.deliver.port.port = port;
+ ha.u.evtchn.deliver.port.vcpu = kvm_arch_vcpu_id(cpu);
+ ha.u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
+
+ return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
+}
+
+static int assign_kernel_eventfd(uint16_t type, evtchn_port_t port, int fd)
+{
+ struct kvm_xen_hvm_attr ha;
+
+ ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
+ ha.u.evtchn.send_port = port;
+ ha.u.evtchn.type = type;
+ ha.u.evtchn.flags = 0;
+ ha.u.evtchn.deliver.eventfd.port = 0;
+ ha.u.evtchn.deliver.eventfd.fd = fd;
+
+ return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
+}
+
+static bool valid_port(evtchn_port_t port)
+{
+ if (!port) {
+ return false;
+ }
+
+ if (xen_is_long_mode()) {
+ return port < EVTCHN_2L_NR_CHANNELS;
+ } else {
+ return port < COMPAT_EVTCHN_2L_NR_CHANNELS;
+ }
+}
+
+static bool valid_vcpu(uint32_t vcpu)
+{
+ return !!qemu_get_cpu(vcpu);
+}
+
+static void unbind_backend_ports(XenEvtchnState *s)
+{
+ XenEvtchnPort *p;
+ int i;
+
+ for (i = 1; i < s->nr_ports; i++) {
+ p = &s->port_table[i];
+ if (p->type == EVTCHNSTAT_interdomain &&
+ (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU)) {
+ evtchn_port_t be_port = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK;
+
+ if (s->be_handles[be_port]) {
+ /* This part will be overwritten on the load anyway. */
+ p->type = EVTCHNSTAT_unbound;
+ p->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU;
+
+ /* Leave the backend port open and unbound too. */
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ deassign_kernel_port(i);
+ }
+ s->be_handles[be_port]->guest_port = 0;
+ }
+ }
+ }
+}
+
+int xen_evtchn_status_op(struct evtchn_status *status)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ XenEvtchnPort *p;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (status->dom != DOMID_SELF && status->dom != xen_domid) {
+ return -ESRCH;
+ }
+
+ if (!valid_port(status->port)) {
+ return -EINVAL;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ p = &s->port_table[status->port];
+
+ status->status = p->type;
+ status->vcpu = p->vcpu;
+
+ switch (p->type) {
+ case EVTCHNSTAT_unbound:
+ if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
+ status->u.unbound.dom = DOMID_QEMU;
+ } else {
+ status->u.unbound.dom = xen_domid;
+ }
+ break;
+
+ case EVTCHNSTAT_interdomain:
+ if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
+ status->u.interdomain.dom = DOMID_QEMU;
+ } else {
+ status->u.interdomain.dom = xen_domid;
+ }
+
+ status->u.interdomain.port = p->type_val &
+ PORT_INFO_TYPEVAL_REMOTE_PORT_MASK;
+ break;
+
+ case EVTCHNSTAT_pirq:
+ status->u.pirq = p->type_val;
+ break;
+
+ case EVTCHNSTAT_virq:
+ status->u.virq = p->type_val;
+ break;
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+ return 0;
+}
+
+/*
+ * Never thought I'd hear myself say this, but C++ templates would be
+ * kind of nice here.
+ *
+ * template<class T> static int do_unmask_port(T *shinfo, ...);
+ */
+static int do_unmask_port_lm(XenEvtchnState *s, evtchn_port_t port,
+ bool do_unmask, struct shared_info *shinfo,
+ struct vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ if (do_unmask) {
+ /*
+ * If this is a true unmask operation, clear the mask bit. If
+ * it was already unmasked, we have nothing further to do.
+ */
+ if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+ return 0;
+ }
+ } else {
+ /*
+ * This is a pseudo-unmask for affinity changes. We don't
+ * change the mask bit, and if it's *masked* we have nothing
+ * else to do.
+ */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+ }
+
+ /* If the event was not pending, we're done. */
+ if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ inject_callback(s, s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int do_unmask_port_compat(XenEvtchnState *s, evtchn_port_t port,
+ bool do_unmask,
+ struct compat_shared_info *shinfo,
+ struct compat_vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ if (do_unmask) {
+ /*
+ * If this is a true unmask operation, clear the mask bit. If
+ * it was already unmasked, we have nothing further to do.
+ */
+ if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+ return 0;
+ }
+ } else {
+ /*
+ * This is a pseudo-unmask for affinity changes. We don't
+ * change the mask bit, and if it's *masked* we have nothing
+ * else to do.
+ */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+ }
+
+ /* If the event was not pending, we're done. */
+ if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ inject_callback(s, s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
+{
+ void *vcpu_info, *shinfo;
+
+ if (s->port_table[port].type == EVTCHNSTAT_closed) {
+ return -EINVAL;
+ }
+
+ shinfo = xen_overlay_get_shinfo_ptr();
+ if (!shinfo) {
+ return -ENOTSUP;
+ }
+
+ vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
+ if (!vcpu_info) {
+ return -EINVAL;
+ }
+
+ if (xen_is_long_mode()) {
+ return do_unmask_port_lm(s, port, do_unmask, shinfo, vcpu_info);
+ } else {
+ return do_unmask_port_compat(s, port, do_unmask, shinfo, vcpu_info);
+ }
+}
+
+static int do_set_port_lm(XenEvtchnState *s, evtchn_port_t port,
+ struct shared_info *shinfo,
+ struct vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ /* Update the pending bit itself. If it was already set, we're done. */
+ if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
+ return 0;
+ }
+
+ /* Check if it's masked. */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ inject_callback(s, s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int do_set_port_compat(XenEvtchnState *s, evtchn_port_t port,
+ struct compat_shared_info *shinfo,
+ struct compat_vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ /* Update the pending bit itself. If it was already set, we're done. */
+ if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
+ return 0;
+ }
+
+ /* Check if it's masked. */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ inject_callback(s, s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int set_port_pending(XenEvtchnState *s, evtchn_port_t port)
+{
+ void *vcpu_info, *shinfo;
+
+ if (s->port_table[port].type == EVTCHNSTAT_closed) {
+ return -EINVAL;
+ }
+
+ if (s->evtchn_in_kernel) {
+ XenEvtchnPort *p = &s->port_table[port];
+ CPUState *cpu = qemu_get_cpu(p->vcpu);
+ struct kvm_irq_routing_xen_evtchn evt;
+
+ if (!cpu) {
+ return 0;
+ }
+
+ evt.port = port;
+ evt.vcpu = kvm_arch_vcpu_id(cpu);
+ evt.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
+
+ return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_EVTCHN_SEND, &evt);
+ }
+
+ shinfo = xen_overlay_get_shinfo_ptr();
+ if (!shinfo) {
+ return -ENOTSUP;
+ }
+
+ vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
+ if (!vcpu_info) {
+ return -EINVAL;
+ }
+
+ if (xen_is_long_mode()) {
+ return do_set_port_lm(s, port, shinfo, vcpu_info);
+ } else {
+ return do_set_port_compat(s, port, shinfo, vcpu_info);
+ }
+}
+
+static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port)
+{
+ void *p = xen_overlay_get_shinfo_ptr();
+
+ if (!p) {
+ return -ENOTSUP;
+ }
+
+ if (xen_is_long_mode()) {
+ struct shared_info *shinfo = p;
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ qatomic_fetch_and(&shinfo->evtchn_pending[idx], ~mask);
+ } else {
+ struct compat_shared_info *shinfo = p;
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ qatomic_fetch_and(&shinfo->evtchn_pending[idx], ~mask);
+ }
+ return 0;
+}
+
+static void free_port(XenEvtchnState *s, evtchn_port_t port)
+{
+ s->port_table[port].type = EVTCHNSTAT_closed;
+ s->port_table[port].type_val = 0;
+ s->port_table[port].vcpu = 0;
+
+ if (s->nr_ports == port + 1) {
+ do {
+ s->nr_ports--;
+ } while (s->nr_ports &&
+ s->port_table[s->nr_ports - 1].type == EVTCHNSTAT_closed);
+ }
+
+ /* Clear pending event to avoid unexpected behavior on re-bind. */
+ clear_port_pending(s, port);
+}
+
+static int allocate_port(XenEvtchnState *s, uint32_t vcpu, uint16_t type,
+ uint16_t val, evtchn_port_t *port)
+{
+ evtchn_port_t p = 1;
+
+ for (p = 1; valid_port(p); p++) {
+ if (s->port_table[p].type == EVTCHNSTAT_closed) {
+ s->port_table[p].vcpu = vcpu;
+ s->port_table[p].type = type;
+ s->port_table[p].type_val = val;
+
+ *port = p;
+
+ if (s->nr_ports < p + 1) {
+ s->nr_ports = p + 1;
+ }
+
+ return 0;
+ }
+ }
+ return -ENOSPC;
+}
+
+static bool virq_is_global(uint32_t virq)
+{
+ switch (virq) {
+ case VIRQ_TIMER:
+ case VIRQ_DEBUG:
+ case VIRQ_XENOPROF:
+ case VIRQ_XENPMU:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+static int close_port(XenEvtchnState *s, evtchn_port_t port,
+ bool *flush_kvm_routes)
+{
+ XenEvtchnPort *p = &s->port_table[port];
+
+ /* Because it *might* be a PIRQ port */
+ assert(qemu_mutex_iothread_locked());
+
+ switch (p->type) {
+ case EVTCHNSTAT_closed:
+ return -ENOENT;
+
+ case EVTCHNSTAT_pirq:
+ s->pirq[p->type_val].port = 0;
+ if (s->pirq[p->type_val].is_translated) {
+ *flush_kvm_routes = true;
+ }
+ break;
+
+ case EVTCHNSTAT_virq:
+ kvm_xen_set_vcpu_virq(virq_is_global(p->type_val) ? 0 : p->vcpu,
+ p->type_val, 0);
+ break;
+
+ case EVTCHNSTAT_ipi:
+ if (s->evtchn_in_kernel) {
+ deassign_kernel_port(port);
+ }
+ break;
+
+ case EVTCHNSTAT_interdomain:
+ if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
+ uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
+ struct xenevtchn_handle *xc = s->be_handles[be_port];
+ if (xc) {
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ deassign_kernel_port(port);
+ }
+ xc->guest_port = 0;
+ }
+ } else {
+ /* Loopback interdomain */
+ XenEvtchnPort *rp = &s->port_table[p->type_val];
+ if (!valid_port(p->type_val) || rp->type_val != port ||
+ rp->type != EVTCHNSTAT_interdomain) {
+ error_report("Inconsistent state for interdomain unbind");
+ } else {
+ /* Set the other end back to unbound */
+ rp->type = EVTCHNSTAT_unbound;
+ rp->type_val = 0;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ free_port(s, port);
+ return 0;
+}
+
+int xen_evtchn_soft_reset(void)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ bool flush_kvm_routes;
+ int i;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ assert(qemu_mutex_iothread_locked());
+
+ qemu_mutex_lock(&s->port_lock);
+
+ for (i = 0; i < s->nr_ports; i++) {
+ close_port(s, i, &flush_kvm_routes);
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ if (flush_kvm_routes) {
+ kvm_update_msi_routes_all(NULL, true, 0, 0);
+ }
+
+ return 0;
+}
+
+int xen_evtchn_reset_op(struct evtchn_reset *reset)
+{
+ if (reset->dom != DOMID_SELF && reset->dom != xen_domid) {
+ return -ESRCH;
+ }
+
+ return xen_evtchn_soft_reset();
+}
+
+int xen_evtchn_close_op(struct evtchn_close *close)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ bool flush_kvm_routes = false;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!valid_port(close->port)) {
+ return -EINVAL;
+ }
+
+ QEMU_IOTHREAD_LOCK_GUARD();
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = close_port(s, close->port, &flush_kvm_routes);
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ if (flush_kvm_routes) {
+ kvm_update_msi_routes_all(NULL, true, 0, 0);
+ }
+
+ return ret;
+}
+
+int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!valid_port(unmask->port)) {
+ return -EINVAL;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = unmask_port(s, unmask->port, true);
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+int xen_evtchn_bind_vcpu_op(struct evtchn_bind_vcpu *vcpu)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ XenEvtchnPort *p;
+ int ret = -EINVAL;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!valid_port(vcpu->port)) {
+ return -EINVAL;
+ }
+
+ if (!valid_vcpu(vcpu->vcpu)) {
+ return -ENOENT;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ p = &s->port_table[vcpu->port];
+
+ if (p->type == EVTCHNSTAT_interdomain ||
+ p->type == EVTCHNSTAT_unbound ||
+ p->type == EVTCHNSTAT_pirq ||
+ (p->type == EVTCHNSTAT_virq && virq_is_global(p->type_val))) {
+ /*
+ * unmask_port() with do_unmask==false will just raise the event
+ * on the new vCPU if the port was already pending.
+ */
+ p->vcpu = vcpu->vcpu;
+ unmask_port(s, vcpu->port, false);
+ ret = 0;
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (virq->virq >= NR_VIRQS) {
+ return -EINVAL;
+ }
+
+ /* Global VIRQ must be allocated on vCPU0 first */
+ if (virq_is_global(virq->virq) && virq->vcpu != 0) {
+ return -EINVAL;
+ }
+
+ if (!valid_vcpu(virq->vcpu)) {
+ return -ENOENT;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = allocate_port(s, virq->vcpu, EVTCHNSTAT_virq, virq->virq,
+ &virq->port);
+ if (!ret) {
+ ret = kvm_xen_set_vcpu_virq(virq->vcpu, virq->virq, virq->port);
+ if (ret) {
+ free_port(s, virq->port);
+ }
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (pirq->pirq >= s->nr_pirqs) {
+ return -EINVAL;
+ }
+
+ QEMU_IOTHREAD_LOCK_GUARD();
+
+ if (s->pirq[pirq->pirq].port) {
+ return -EBUSY;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = allocate_port(s, 0, EVTCHNSTAT_pirq, pirq->pirq,
+ &pirq->port);
+ if (ret) {
+ qemu_mutex_unlock(&s->port_lock);
+ return ret;
+ }
+
+ s->pirq[pirq->pirq].port = pirq->port;
+ trace_kvm_xen_bind_pirq(pirq->pirq, pirq->port);
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ /*
+ * Need to do the unmask outside port_lock because it may call
+ * back into the MSI translate function.
+ */
+ if (s->pirq[pirq->pirq].gsi == IRQ_MSI_EMU) {
+ if (s->pirq[pirq->pirq].is_masked) {
+ PCIDevice *dev = s->pirq[pirq->pirq].dev;
+ int vector = s->pirq[pirq->pirq].vector;
+ char *dev_path = qdev_get_dev_path(DEVICE(dev));
+
+ trace_kvm_xen_unmask_pirq(pirq->pirq, dev_path, vector);
+ g_free(dev_path);
+
+ if (s->pirq[pirq->pirq].is_msix) {
+ msix_set_mask(dev, vector, false);
+ } else {
+ msi_set_mask(dev, vector, false, NULL);
+ }
+ } else if (s->pirq[pirq->pirq].is_translated) {
+ /*
+ * If KVM had attempted to translate this one before, make it try
+ * again. If we unmasked, then the notifier on the MSI(-X) vector
+ * will already have had the same effect.
+ */
+ kvm_update_msi_routes_all(NULL, true, 0, 0);
+ }
+ }
+
+ return ret;
+}
+
+int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!valid_vcpu(ipi->vcpu)) {
+ return -ENOENT;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = allocate_port(s, ipi->vcpu, EVTCHNSTAT_ipi, 0, &ipi->port);
+ if (!ret && s->evtchn_in_kernel) {
+ assign_kernel_port(EVTCHNSTAT_ipi, ipi->port, ipi->vcpu);
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ uint16_t type_val;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (interdomain->remote_dom == DOMID_QEMU) {
+ type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU;
+ } else if (interdomain->remote_dom == DOMID_SELF ||
+ interdomain->remote_dom == xen_domid) {
+ type_val = 0;
+ } else {
+ return -ESRCH;
+ }
+
+ if (!valid_port(interdomain->remote_port)) {
+ return -EINVAL;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ /* The newly allocated port starts out as unbound */
+ ret = allocate_port(s, 0, EVTCHNSTAT_unbound, type_val,
+ &interdomain->local_port);
+ if (ret) {
+ goto out;
+ }
+
+ if (interdomain->remote_dom == DOMID_QEMU) {
+ struct xenevtchn_handle *xc = s->be_handles[interdomain->remote_port];
+ XenEvtchnPort *lp = &s->port_table[interdomain->local_port];
+
+ if (!xc) {
+ ret = -ENOENT;
+ goto out_free_port;
+ }
+
+ if (xc->guest_port) {
+ ret = -EBUSY;
+ goto out_free_port;
+ }
+
+ assert(xc->be_port == interdomain->remote_port);
+ xc->guest_port = interdomain->local_port;
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ assign_kernel_eventfd(lp->type, xc->guest_port, xc->fd);
+ }
+ lp->type = EVTCHNSTAT_interdomain;
+ lp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU | interdomain->remote_port;
+ ret = 0;
+ } else {
+ /* Loopback */
+ XenEvtchnPort *rp = &s->port_table[interdomain->remote_port];
+ XenEvtchnPort *lp = &s->port_table[interdomain->local_port];
+
+ if (rp->type == EVTCHNSTAT_unbound && rp->type_val == 0) {
+ /* It's a match! */
+ rp->type = EVTCHNSTAT_interdomain;
+ rp->type_val = interdomain->local_port;
+
+ lp->type = EVTCHNSTAT_interdomain;
+ lp->type_val = interdomain->remote_port;
+ } else {
+ ret = -EINVAL;
+ }
+ }
+
+ out_free_port:
+ if (ret) {
+ free_port(s, interdomain->local_port);
+ }
+ out:
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+
+}
+int xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound *alloc)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ uint16_t type_val;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (alloc->dom != DOMID_SELF && alloc->dom != xen_domid) {
+ return -ESRCH;
+ }
+
+ if (alloc->remote_dom == DOMID_QEMU) {
+ type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU;
+ } else if (alloc->remote_dom == DOMID_SELF ||
+ alloc->remote_dom == xen_domid) {
+ type_val = 0;
+ } else {
+ return -EPERM;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = allocate_port(s, 0, EVTCHNSTAT_unbound, type_val, &alloc->port);
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+int xen_evtchn_send_op(struct evtchn_send *send)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ XenEvtchnPort *p;
+ int ret = 0;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!valid_port(send->port)) {
+ return -EINVAL;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ p = &s->port_table[send->port];
+
+ switch (p->type) {
+ case EVTCHNSTAT_interdomain:
+ if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
+ /*
+ * This is an event from the guest to qemu itself, which is
+ * serving as the driver domain.
+ */
+ uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
+ struct xenevtchn_handle *xc = s->be_handles[be_port];
+ if (xc) {
+ eventfd_write(xc->fd, 1);
+ ret = 0;
+ } else {
+ ret = -ENOENT;
+ }
+ } else {
+ /* Loopback interdomain ports; just a complex IPI */
+ set_port_pending(s, p->type_val);
+ }
+ break;
+
+ case EVTCHNSTAT_ipi:
+ set_port_pending(s, send->port);
+ break;
+
+ case EVTCHNSTAT_unbound:
+ /* Xen will silently drop these */
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+int xen_evtchn_set_port(uint16_t port)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ XenEvtchnPort *p;
+ int ret = -EINVAL;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!valid_port(port)) {
+ return -EINVAL;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ p = &s->port_table[port];
+
+ /* QEMU has no business sending to anything but these */
+ if (p->type == EVTCHNSTAT_virq ||
+ (p->type == EVTCHNSTAT_interdomain &&
+ (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU))) {
+ set_port_pending(s, port);
+ ret = 0;
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+static int allocate_pirq(XenEvtchnState *s, int type, int gsi)
+{
+ uint16_t pirq;
+
+ /*
+ * Preserve the allocation strategy that Xen has. It looks like
+ * we *never* give out PIRQ 0-15, we give out 16-nr_irqs_gsi only
+ * to GSIs (counting up from 16), and then we count backwards from
+ * the top for MSIs or when the GSI space is exhausted.
+ */
+ if (type == MAP_PIRQ_TYPE_GSI) {
+ for (pirq = 16 ; pirq < IOAPIC_NUM_PINS; pirq++) {
+ if (pirq_inuse(s, pirq)) {
+ continue;
+ }
+
+ /* Found it */
+ goto found;
+ }
+ }
+ for (pirq = s->nr_pirqs - 1; pirq >= IOAPIC_NUM_PINS; pirq--) {
+ /* Skip whole words at a time when they're full */
+ if (pirq_inuse_word(s, pirq) == UINT64_MAX) {
+ pirq &= ~63ULL;
+ continue;
+ }
+ if (pirq_inuse(s, pirq)) {
+ continue;
+ }
+
+ goto found;
+ }
+ return -ENOSPC;
+
+ found:
+ pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq);
+ if (gsi >= 0) {
+ assert(gsi <= IOAPIC_NUM_PINS);
+ s->gsi_pirq[gsi] = pirq;
+ }
+ s->pirq[pirq].gsi = gsi;
+ return pirq;
+}
+
+bool xen_evtchn_set_gsi(int gsi, int level)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int pirq;
+
+ assert(qemu_mutex_iothread_locked());
+
+ if (!s || gsi < 0 || gsi > IOAPIC_NUM_PINS) {
+ return false;
+ }
+
+ /*
+ * Check that that it *isn't* the event channel GSI, and thus
+ * that we are not recursing and it's safe to take s->port_lock.
+ *
+ * Locking aside, it's perfectly sane to bail out early for that
+ * special case, as it would make no sense for the event channel
+ * GSI to be routed back to event channels, when the delivery
+ * method is to raise the GSI... that recursion wouldn't *just*
+ * be a locking issue.
+ */
+ if (gsi && gsi == s->callback_gsi) {
+ return false;
+ }
+
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ pirq = s->gsi_pirq[gsi];
+ if (!pirq) {
+ return false;
+ }
+
+ if (level) {
+ int port = s->pirq[pirq].port;
+
+ s->pirq_gsi_set |= (1U << gsi);
+ if (port) {
+ set_port_pending(s, port);
+ }
+ } else {
+ s->pirq_gsi_set &= ~(1U << gsi);
+ }
+ return true;
+}
+
+static uint32_t msi_pirq_target(uint64_t addr, uint32_t data)
+{
+ /* The vector (in low 8 bits of data) must be zero */
+ if (data & 0xff) {
+ return 0;
+ }
+
+ uint32_t pirq = (addr & 0xff000) >> 12;
+ pirq |= (addr >> 32) & 0xffffff00;
+
+ return pirq;
+}
+
+static void do_remove_pci_vector(XenEvtchnState *s, PCIDevice *dev, int vector,
+ int except_pirq)
+{
+ uint32_t pirq;
+
+ for (pirq = 0; pirq < s->nr_pirqs; pirq++) {
+ /*
+ * We could be cleverer here, but it isn't really a fast path, and
+ * this trivial optimisation is enough to let us skip the big gap
+ * in the middle a bit quicker (in terms of both loop iterations,
+ * and cache lines).
+ */
+ if (!(pirq & 63) && !(pirq_inuse_word(s, pirq))) {
+ pirq += 64;
+ continue;
+ }
+ if (except_pirq && pirq == except_pirq) {
+ continue;
+ }
+ if (s->pirq[pirq].dev != dev) {
+ continue;
+ }
+ if (vector != -1 && s->pirq[pirq].vector != vector) {
+ continue;
+ }
+
+ /* It could theoretically be bound to a port already, but that is OK. */
+ s->pirq[pirq].dev = dev;
+ s->pirq[pirq].gsi = IRQ_UNBOUND;
+ s->pirq[pirq].is_msix = false;
+ s->pirq[pirq].vector = 0;
+ s->pirq[pirq].is_masked = false;
+ s->pirq[pirq].is_translated = false;
+ }
+}
+
+void xen_evtchn_remove_pci_device(PCIDevice *dev)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+
+ if (!s) {
+ return;
+ }
+
+ QEMU_LOCK_GUARD(&s->port_lock);
+ do_remove_pci_vector(s, dev, -1, 0);
+}
+
+void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
+ uint64_t addr, uint32_t data, bool is_masked)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ uint32_t pirq;
+
+ if (!s) {
+ return;
+ }
+
+ assert(qemu_mutex_iothread_locked());
+
+ pirq = msi_pirq_target(addr, data);
+
+ /*
+ * The PIRQ# must be sane, and there must be an allocated PIRQ in
+ * IRQ_UNBOUND or IRQ_MSI_EMU state to match it.
+ */
+ if (!pirq || pirq >= s->nr_pirqs || !pirq_inuse(s, pirq) ||
+ (s->pirq[pirq].gsi != IRQ_UNBOUND &&
+ s->pirq[pirq].gsi != IRQ_MSI_EMU)) {
+ pirq = 0;
+ }
+
+ if (pirq) {
+ s->pirq[pirq].dev = dev;
+ s->pirq[pirq].gsi = IRQ_MSI_EMU;
+ s->pirq[pirq].is_msix = is_msix;
+ s->pirq[pirq].vector = vector;
+ s->pirq[pirq].is_masked = is_masked;
+ }
+
+ /* Remove any (other) entries for this {device, vector} */
+ do_remove_pci_vector(s, dev, vector, pirq);
+}
+
+int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ uint32_t pirq, port;
+ CPUState *cpu;
+
+ if (!s) {
+ return 1; /* Not a PIRQ */
+ }
+
+ assert(qemu_mutex_iothread_locked());
+
+ pirq = msi_pirq_target(address, data);
+ if (!pirq || pirq >= s->nr_pirqs) {
+ return 1; /* Not a PIRQ */
+ }
+
+ if (!kvm_xen_has_cap(EVTCHN_2LEVEL)) {
+ return -ENOTSUP;
+ }
+
+ if (s->pirq[pirq].gsi != IRQ_MSI_EMU) {
+ return -EINVAL;
+ }
+
+ /* Remember that KVM tried to translate this. It might need to try again. */
+ s->pirq[pirq].is_translated = true;
+
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ port = s->pirq[pirq].port;
+ if (!valid_port(port)) {
+ return -EINVAL;
+ }
+
+ cpu = qemu_get_cpu(s->port_table[port].vcpu);
+ if (!cpu) {
+ return -EINVAL;
+ }
+
+ route->type = KVM_IRQ_ROUTING_XEN_EVTCHN;
+ route->u.xen_evtchn.port = port;
+ route->u.xen_evtchn.vcpu = kvm_arch_vcpu_id(cpu);
+ route->u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
+
+ return 0; /* Handled */
+}
+
+bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ uint32_t pirq, port;
+
+ if (!s) {
+ return false;
+ }
+
+ assert(qemu_mutex_iothread_locked());
+
+ pirq = msi_pirq_target(address, data);
+ if (!pirq || pirq >= s->nr_pirqs) {
+ return false;
+ }
+
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ port = s->pirq[pirq].port;
+ if (!valid_port(port)) {
+ return false;
+ }
+
+ set_port_pending(s, port);
+ return true;
+}
+
+int xen_physdev_map_pirq(struct physdev_map_pirq *map)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int pirq = map->pirq;
+ int gsi = map->index;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ QEMU_IOTHREAD_LOCK_GUARD();
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ if (map->domid != DOMID_SELF && map->domid != xen_domid) {
+ return -EPERM;
+ }
+ if (map->type != MAP_PIRQ_TYPE_GSI) {
+ return -EINVAL;
+ }
+ if (gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
+ return -EINVAL;
+ }
+
+ if (pirq < 0) {
+ pirq = allocate_pirq(s, map->type, gsi);
+ if (pirq < 0) {
+ return pirq;
+ }
+ map->pirq = pirq;
+ } else if (pirq > s->nr_pirqs) {
+ return -EINVAL;
+ } else {
+ /*
+ * User specified a valid-looking PIRQ#. Allow it if it is
+ * allocated and not yet bound, or if it is unallocated
+ */
+ if (pirq_inuse(s, pirq)) {
+ if (s->pirq[pirq].gsi != IRQ_UNBOUND) {
+ return -EBUSY;
+ }
+ } else {
+ /* If it was unused, mark it used now. */
+ pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq);
+ }
+ /* Set the mapping in both directions. */
+ s->pirq[pirq].gsi = gsi;
+ s->gsi_pirq[gsi] = pirq;
+ }
+
+ trace_kvm_xen_map_pirq(pirq, gsi);
+ return 0;
+}
+
+int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int pirq = unmap->pirq;
+ int gsi;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (unmap->domid != DOMID_SELF && unmap->domid != xen_domid) {
+ return -EPERM;
+ }
+ if (pirq < 0 || pirq >= s->nr_pirqs) {
+ return -EINVAL;
+ }
+
+ QEMU_IOTHREAD_LOCK_GUARD();
+ qemu_mutex_lock(&s->port_lock);
+
+ if (!pirq_inuse(s, pirq)) {
+ qemu_mutex_unlock(&s->port_lock);
+ return -ENOENT;
+ }
+
+ gsi = s->pirq[pirq].gsi;
+
+ /* We can only unmap GSI PIRQs */
+ if (gsi < 0) {
+ qemu_mutex_unlock(&s->port_lock);
+ return -EINVAL;
+ }
+
+ s->gsi_pirq[gsi] = 0;
+ s->pirq[pirq].gsi = IRQ_UNBOUND; /* Doesn't actually matter because: */
+ pirq_inuse_word(s, pirq) &= ~pirq_inuse_bit(pirq);
+
+ trace_kvm_xen_unmap_pirq(pirq, gsi);
+ qemu_mutex_unlock(&s->port_lock);
+
+ if (gsi == IRQ_MSI_EMU) {
+ kvm_update_msi_routes_all(NULL, true, 0, 0);
+ }
+
+ return 0;
+}
+
+int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int pirq = eoi->irq;
+ int gsi;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ QEMU_IOTHREAD_LOCK_GUARD();
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ if (!pirq_inuse(s, pirq)) {
+ return -ENOENT;
+ }
+
+ gsi = s->pirq[pirq].gsi;
+ if (gsi < 0) {
+ return -EINVAL;
+ }
+
+ /* Reassert a level IRQ if needed */
+ if (s->pirq_gsi_set & (1U << gsi)) {
+ int port = s->pirq[pirq].port;
+ if (port) {
+ set_port_pending(s, port);
+ }
+ }
+
+ return 0;
+}
+
+int xen_physdev_query_pirq(struct physdev_irq_status_query *query)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int pirq = query->irq;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ QEMU_IOTHREAD_LOCK_GUARD();
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ if (!pirq_inuse(s, pirq)) {
+ return -ENOENT;
+ }
+
+ if (s->pirq[pirq].gsi >= 0) {
+ query->flags = XENIRQSTAT_needs_eoi;
+ } else {
+ query->flags = 0;
+ }
+
+ return 0;
+}
+
+int xen_physdev_get_free_pirq(struct physdev_get_free_pirq *get)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int pirq;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ pirq = allocate_pirq(s, get->type, IRQ_UNBOUND);
+ if (pirq < 0) {
+ return pirq;
+ }
+
+ get->pirq = pirq;
+ trace_kvm_xen_get_free_pirq(pirq, get->type);
+ return 0;
+}
+
+struct xenevtchn_handle *xen_be_evtchn_open(void)
+{
+ struct xenevtchn_handle *xc = g_new0(struct xenevtchn_handle, 1);
+
+ xc->fd = eventfd(0, EFD_CLOEXEC);
+ if (xc->fd < 0) {
+ free(xc);
+ return NULL;
+ }
+
+ return xc;
+}
+
+static int find_be_port(XenEvtchnState *s, struct xenevtchn_handle *xc)
+{
+ int i;
+
+ for (i = 1; i < EVTCHN_2L_NR_CHANNELS; i++) {
+ if (!s->be_handles[i]) {
+ s->be_handles[i] = xc;
+ xc->be_port = i;
+ return i;
+ }
+ }
+ return 0;
+}
+
+int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid,
+ evtchn_port_t guest_port)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ XenEvtchnPort *gp;
+ uint16_t be_port = 0;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!xc) {
+ return -EFAULT;
+ }
+
+ if (domid != xen_domid) {
+ return -ESRCH;
+ }
+
+ if (!valid_port(guest_port)) {
+ return -EINVAL;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ /* The guest has to have an unbound port waiting for us to bind */
+ gp = &s->port_table[guest_port];
+
+ switch (gp->type) {
+ case EVTCHNSTAT_interdomain:
+ /* Allow rebinding after migration, preserve port # if possible */
+ be_port = gp->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU;
+ assert(be_port != 0);
+ if (!s->be_handles[be_port]) {
+ s->be_handles[be_port] = xc;
+ xc->guest_port = guest_port;
+ ret = xc->be_port = be_port;
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ assign_kernel_eventfd(gp->type, guest_port, xc->fd);
+ }
+ break;
+ }
+ /* fall through */
+
+ case EVTCHNSTAT_unbound:
+ be_port = find_be_port(s, xc);
+ if (!be_port) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ gp->type = EVTCHNSTAT_interdomain;
+ gp->type_val = be_port | PORT_INFO_TYPEVAL_REMOTE_QEMU;
+ xc->guest_port = guest_port;
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ assign_kernel_eventfd(gp->type, guest_port, xc->fd);
+ }
+ ret = be_port;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ out:
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!xc) {
+ return -EFAULT;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ if (port && port != xc->be_port) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (xc->guest_port) {
+ XenEvtchnPort *gp = &s->port_table[xc->guest_port];
+
+ /* This should never *not* be true */
+ if (gp->type == EVTCHNSTAT_interdomain) {
+ gp->type = EVTCHNSTAT_unbound;
+ gp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU;
+ }
+
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ deassign_kernel_port(xc->guest_port);
+ }
+ xc->guest_port = 0;
+ }
+
+ s->be_handles[xc->be_port] = NULL;
+ xc->be_port = 0;
+ ret = 0;
+ out:
+ qemu_mutex_unlock(&s->port_lock);
+ return ret;
+}
+
+int xen_be_evtchn_close(struct xenevtchn_handle *xc)
+{
+ if (!xc) {
+ return -EFAULT;
+ }
+
+ xen_be_evtchn_unbind(xc, 0);
+
+ close(xc->fd);
+ free(xc);
+ return 0;
+}
+
+int xen_be_evtchn_fd(struct xenevtchn_handle *xc)
+{
+ if (!xc) {
+ return -1;
+ }
+ return xc->fd;
+}
+
+int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!xc) {
+ return -EFAULT;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ if (xc->guest_port) {
+ set_port_pending(s, xc->guest_port);
+ ret = 0;
+ } else {
+ ret = -ENOTCONN;
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
+
+int xen_be_evtchn_pending(struct xenevtchn_handle *xc)
+{
+ uint64_t val;
+
+ if (!xc) {
+ return -EFAULT;
+ }
+
+ if (!xc->be_port) {
+ return 0;
+ }
+
+ if (eventfd_read(xc->fd, &val)) {
+ return -errno;
+ }
+
+ return val ? xc->be_port : 0;
+}
+
+int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port)
+{
+ if (!xc) {
+ return -EFAULT;
+ }
+
+ if (xc->be_port != port) {
+ return -EINVAL;
+ }
+
+ /*
+ * We don't actually do anything to unmask it; the event was already
+ * consumed in xen_be_evtchn_pending().
+ */
+ return 0;
+}
+
+int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc)
+{
+ return xc->guest_port;
+}
+
+EvtchnInfoList *qmp_xen_event_list(Error **errp)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ EvtchnInfoList *head = NULL, **tail = &head;
+ void *shinfo, *pending, *mask;
+ int i;
+
+ if (!s) {
+ error_setg(errp, "Xen event channel emulation not enabled");
+ return NULL;
+ }
+
+ shinfo = xen_overlay_get_shinfo_ptr();
+ if (!shinfo) {
+ error_setg(errp, "Xen shared info page not allocated");
+ return NULL;
+ }
+
+ if (xen_is_long_mode()) {
+ pending = shinfo + offsetof(struct shared_info, evtchn_pending);
+ mask = shinfo + offsetof(struct shared_info, evtchn_mask);
+ } else {
+ pending = shinfo + offsetof(struct compat_shared_info, evtchn_pending);
+ mask = shinfo + offsetof(struct compat_shared_info, evtchn_mask);
+ }
+
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ for (i = 0; i < s->nr_ports; i++) {
+ XenEvtchnPort *p = &s->port_table[i];
+ EvtchnInfo *info;
+
+ if (p->type == EVTCHNSTAT_closed) {
+ continue;
+ }
+
+ info = g_new0(EvtchnInfo, 1);
+
+ info->port = i;
+ qemu_build_assert(EVTCHN_PORT_TYPE_CLOSED == EVTCHNSTAT_closed);
+ qemu_build_assert(EVTCHN_PORT_TYPE_UNBOUND == EVTCHNSTAT_unbound);
+ qemu_build_assert(EVTCHN_PORT_TYPE_INTERDOMAIN == EVTCHNSTAT_interdomain);
+ qemu_build_assert(EVTCHN_PORT_TYPE_PIRQ == EVTCHNSTAT_pirq);
+ qemu_build_assert(EVTCHN_PORT_TYPE_VIRQ == EVTCHNSTAT_virq);
+ qemu_build_assert(EVTCHN_PORT_TYPE_IPI == EVTCHNSTAT_ipi);
+
+ info->type = p->type;
+ if (p->type == EVTCHNSTAT_interdomain) {
+ info->remote_domain = g_strdup((p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) ?
+ "qemu" : "loopback");
+ info->target = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK;
+ } else {
+ info->target = p->type_val;
+ }
+ info->vcpu = p->vcpu;
+ info->pending = test_bit(i, pending);
+ info->masked = test_bit(i, mask);
+
+ QAPI_LIST_APPEND(tail, info);
+ }
+
+ return head;
+}
+
+void qmp_xen_event_inject(uint32_t port, Error **errp)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+
+ if (!s) {
+ error_setg(errp, "Xen event channel emulation not enabled");
+ return;
+ }
+
+ if (!valid_port(port)) {
+ error_setg(errp, "Invalid port %u", port);
+ }
+
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ if (set_port_pending(s, port)) {
+ error_setg(errp, "Failed to set port %u", port);
+ return;
+ }
+}
+
+void hmp_xen_event_list(Monitor *mon, const QDict *qdict)
+{
+ EvtchnInfoList *iter, *info_list;
+ Error *err = NULL;
+
+ info_list = qmp_xen_event_list(&err);
+ if (err) {
+ hmp_handle_error(mon, err);
+ return;
+ }
+
+ for (iter = info_list; iter; iter = iter->next) {
+ EvtchnInfo *info = iter->value;
+
+ monitor_printf(mon, "port %4u: vcpu: %d %s", info->port, info->vcpu,
+ EvtchnPortType_str(info->type));
+ if (info->type != EVTCHN_PORT_TYPE_IPI) {
+ monitor_printf(mon, "(");
+ if (info->remote_domain) {
+ monitor_printf(mon, "%s:", info->remote_domain);
+ }
+ monitor_printf(mon, "%d)", info->target);
+ }
+ if (info->pending) {
+ monitor_printf(mon, " PENDING");
+ }
+ if (info->masked) {
+ monitor_printf(mon, " MASKED");
+ }
+ monitor_printf(mon, "\n");
+ }
+
+ qapi_free_EvtchnInfoList(info_list);
+}
+
+void hmp_xen_event_inject(Monitor *mon, const QDict *qdict)
+{
+ int port = qdict_get_int(qdict, "port");
+ Error *err = NULL;
+
+ qmp_xen_event_inject(port, &err);
+ if (err) {
+ hmp_handle_error(mon, err);
+ } else {
+ monitor_printf(mon, "Delivered port %d\n", port);
+ }
+}
+
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
new file mode 100644
index 0000000000..bfb67ac2bc
--- /dev/null
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -0,0 +1,88 @@
+/*
+ * QEMU Xen emulation: Event channel support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_XEN_EVTCHN_H
+#define QEMU_XEN_EVTCHN_H
+
+#include "hw/sysbus.h"
+
+typedef uint32_t evtchn_port_t;
+
+void xen_evtchn_create(void);
+int xen_evtchn_soft_reset(void);
+int xen_evtchn_set_callback_param(uint64_t param);
+void xen_evtchn_connect_gsis(qemu_irq *system_gsis);
+void xen_evtchn_set_callback_level(int level);
+
+int xen_evtchn_set_port(uint16_t port);
+
+bool xen_evtchn_set_gsi(int gsi, int level);
+void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
+ uint64_t addr, uint32_t data, bool is_masked);
+void xen_evtchn_remove_pci_device(PCIDevice *dev);
+struct kvm_irq_routing_entry;
+int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data);
+bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data);
+
+
+/*
+ * These functions mirror the libxenevtchn library API, providing the QEMU
+ * backend side of "interdomain" event channels.
+ */
+struct xenevtchn_handle;
+struct xenevtchn_handle *xen_be_evtchn_open(void);
+int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid,
+ evtchn_port_t guest_port);
+int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port);
+int xen_be_evtchn_close(struct xenevtchn_handle *xc);
+int xen_be_evtchn_fd(struct xenevtchn_handle *xc);
+int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port);
+int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port);
+int xen_be_evtchn_pending(struct xenevtchn_handle *xc);
+/* Apart from this which is a local addition */
+int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc);
+
+struct evtchn_status;
+struct evtchn_close;
+struct evtchn_unmask;
+struct evtchn_bind_virq;
+struct evtchn_bind_pirq;
+struct evtchn_bind_ipi;
+struct evtchn_send;
+struct evtchn_alloc_unbound;
+struct evtchn_bind_interdomain;
+struct evtchn_bind_vcpu;
+struct evtchn_reset;
+int xen_evtchn_status_op(struct evtchn_status *status);
+int xen_evtchn_close_op(struct evtchn_close *close);
+int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
+int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq);
+int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq);
+int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi);
+int xen_evtchn_send_op(struct evtchn_send *send);
+int xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound *alloc);
+int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain);
+int xen_evtchn_bind_vcpu_op(struct evtchn_bind_vcpu *vcpu);
+int xen_evtchn_reset_op(struct evtchn_reset *reset);
+
+struct physdev_map_pirq;
+struct physdev_unmap_pirq;
+struct physdev_eoi;
+struct physdev_irq_status_query;
+struct physdev_get_free_pirq;
+int xen_physdev_map_pirq(struct physdev_map_pirq *map);
+int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap);
+int xen_physdev_eoi_pirq(struct physdev_eoi *eoi);
+int xen_physdev_query_pirq(struct physdev_irq_status_query *query);
+int xen_physdev_get_free_pirq(struct physdev_get_free_pirq *get);
+
+#endif /* QEMU_XEN_EVTCHN_H */
diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c
new file mode 100644
index 0000000000..1e691ded32
--- /dev/null
+++ b/hw/i386/kvm/xen_gnttab.c
@@ -0,0 +1,232 @@
+/*
+ * QEMU Xen emulation: Grant table support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "qemu/module.h"
+#include "qemu/lockable.h"
+#include "qemu/main-loop.h"
+#include "qapi/error.h"
+#include "qom/object.h"
+#include "exec/target_page.h"
+#include "exec/address-spaces.h"
+#include "migration/vmstate.h"
+
+#include "hw/sysbus.h"
+#include "hw/xen/xen.h"
+#include "xen_overlay.h"
+#include "xen_gnttab.h"
+
+#include "sysemu/kvm.h"
+#include "sysemu/kvm_xen.h"
+
+#include "hw/xen/interface/memory.h"
+#include "hw/xen/interface/grant_table.h"
+
+#define TYPE_XEN_GNTTAB "xen-gnttab"
+OBJECT_DECLARE_SIMPLE_TYPE(XenGnttabState, XEN_GNTTAB)
+
+#define XEN_PAGE_SHIFT 12
+#define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT)
+
+#define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
+
+struct XenGnttabState {
+ /*< private >*/
+ SysBusDevice busdev;
+ /*< public >*/
+
+ QemuMutex gnt_lock;
+
+ uint32_t nr_frames;
+ uint32_t max_frames;
+
+ union {
+ grant_entry_v1_t *v1;
+ /* Theoretically, v2 support could be added here. */
+ } entries;
+
+ MemoryRegion gnt_frames;
+ MemoryRegion *gnt_aliases;
+ uint64_t *gnt_frame_gpas;
+};
+
+struct XenGnttabState *xen_gnttab_singleton;
+
+static void xen_gnttab_realize(DeviceState *dev, Error **errp)
+{
+ XenGnttabState *s = XEN_GNTTAB(dev);
+ int i;
+
+ if (xen_mode != XEN_EMULATE) {
+ error_setg(errp, "Xen grant table support is for Xen emulation");
+ return;
+ }
+ s->nr_frames = 0;
+ s->max_frames = kvm_xen_get_gnttab_max_frames();
+ memory_region_init_ram(&s->gnt_frames, OBJECT(dev), "xen:grant_table",
+ XEN_PAGE_SIZE * s->max_frames, &error_abort);
+ memory_region_set_enabled(&s->gnt_frames, true);
+ s->entries.v1 = memory_region_get_ram_ptr(&s->gnt_frames);
+ memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames);
+
+ /* Create individual page-sizes aliases for overlays */
+ s->gnt_aliases = (void *)g_new0(MemoryRegion, s->max_frames);
+ s->gnt_frame_gpas = (void *)g_new(uint64_t, s->max_frames);
+ for (i = 0; i < s->max_frames; i++) {
+ memory_region_init_alias(&s->gnt_aliases[i], OBJECT(dev),
+ NULL, &s->gnt_frames,
+ i * XEN_PAGE_SIZE, XEN_PAGE_SIZE);
+ s->gnt_frame_gpas[i] = INVALID_GPA;
+ }
+
+ qemu_mutex_init(&s->gnt_lock);
+
+ xen_gnttab_singleton = s;
+}
+
+static int xen_gnttab_post_load(void *opaque, int version_id)
+{
+ XenGnttabState *s = XEN_GNTTAB(opaque);
+ uint32_t i;
+
+ for (i = 0; i < s->nr_frames; i++) {
+ if (s->gnt_frame_gpas[i] != INVALID_GPA) {
+ xen_overlay_do_map_page(&s->gnt_aliases[i], s->gnt_frame_gpas[i]);
+ }
+ }
+ return 0;
+}
+
+static bool xen_gnttab_is_needed(void *opaque)
+{
+ return xen_mode == XEN_EMULATE;
+}
+
+static const VMStateDescription xen_gnttab_vmstate = {
+ .name = "xen_gnttab",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xen_gnttab_is_needed,
+ .post_load = xen_gnttab_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(nr_frames, XenGnttabState),
+ VMSTATE_VARRAY_UINT32(gnt_frame_gpas, XenGnttabState, nr_frames, 0,
+ vmstate_info_uint64, uint64_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void xen_gnttab_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = xen_gnttab_realize;
+ dc->vmsd = &xen_gnttab_vmstate;
+}
+
+static const TypeInfo xen_gnttab_info = {
+ .name = TYPE_XEN_GNTTAB,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XenGnttabState),
+ .class_init = xen_gnttab_class_init,
+};
+
+void xen_gnttab_create(void)
+{
+ xen_gnttab_singleton = XEN_GNTTAB(sysbus_create_simple(TYPE_XEN_GNTTAB,
+ -1, NULL));
+}
+
+static void xen_gnttab_register_types(void)
+{
+ type_register_static(&xen_gnttab_info);
+}
+
+type_init(xen_gnttab_register_types)
+
+int xen_gnttab_map_page(uint64_t idx, uint64_t gfn)
+{
+ XenGnttabState *s = xen_gnttab_singleton;
+ uint64_t gpa = gfn << XEN_PAGE_SHIFT;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (idx >= s->max_frames) {
+ return -EINVAL;
+ }
+
+ QEMU_IOTHREAD_LOCK_GUARD();
+ QEMU_LOCK_GUARD(&s->gnt_lock);
+
+ xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa);
+
+ s->gnt_frame_gpas[idx] = gpa;
+
+ if (s->nr_frames <= idx) {
+ s->nr_frames = idx + 1;
+ }
+
+ return 0;
+}
+
+int xen_gnttab_set_version_op(struct gnttab_set_version *set)
+{
+ int ret;
+
+ switch (set->version) {
+ case 1:
+ ret = 0;
+ break;
+
+ case 2:
+ /* Behave as before set_version was introduced. */
+ ret = -ENOSYS;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ set->version = 1;
+ return ret;
+}
+
+int xen_gnttab_get_version_op(struct gnttab_get_version *get)
+{
+ if (get->dom != DOMID_SELF && get->dom != xen_domid) {
+ return -ESRCH;
+ }
+
+ get->version = 1;
+ return 0;
+}
+
+int xen_gnttab_query_size_op(struct gnttab_query_size *size)
+{
+ XenGnttabState *s = xen_gnttab_singleton;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (size->dom != DOMID_SELF && size->dom != xen_domid) {
+ size->status = GNTST_bad_domain;
+ return 0;
+ }
+
+ size->status = GNTST_okay;
+ size->nr_frames = s->nr_frames;
+ size->max_nr_frames = s->max_frames;
+ return 0;
+}
diff --git a/hw/i386/kvm/xen_gnttab.h b/hw/i386/kvm/xen_gnttab.h
new file mode 100644
index 0000000000..3bdbe96191
--- /dev/null
+++ b/hw/i386/kvm/xen_gnttab.h
@@ -0,0 +1,25 @@
+/*
+ * QEMU Xen emulation: Grant table support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_XEN_GNTTAB_H
+#define QEMU_XEN_GNTTAB_H
+
+void xen_gnttab_create(void);
+int xen_gnttab_map_page(uint64_t idx, uint64_t gfn);
+
+struct gnttab_set_version;
+struct gnttab_get_version;
+struct gnttab_query_size;
+int xen_gnttab_set_version_op(struct gnttab_set_version *set);
+int xen_gnttab_get_version_op(struct gnttab_get_version *get);
+int xen_gnttab_query_size_op(struct gnttab_query_size *size);
+
+#endif /* QEMU_XEN_GNTTAB_H */
diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c
new file mode 100644
index 0000000000..39fda1b72c
--- /dev/null
+++ b/hw/i386/kvm/xen_overlay.c
@@ -0,0 +1,272 @@
+/*
+ * QEMU Xen emulation: Shared/overlay pages support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "qemu/module.h"
+#include "qemu/main-loop.h"
+#include "qapi/error.h"
+#include "qom/object.h"
+#include "exec/target_page.h"
+#include "exec/address-spaces.h"
+#include "migration/vmstate.h"
+
+#include "hw/sysbus.h"
+#include "hw/xen/xen.h"
+#include "xen_overlay.h"
+
+#include "sysemu/kvm.h"
+#include "sysemu/kvm_xen.h"
+#include <linux/kvm.h>
+
+#include "hw/xen/interface/memory.h"
+
+
+#define TYPE_XEN_OVERLAY "xen-overlay"
+OBJECT_DECLARE_SIMPLE_TYPE(XenOverlayState, XEN_OVERLAY)
+
+#define XEN_PAGE_SHIFT 12
+#define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT)
+
+struct XenOverlayState {
+ /*< private >*/
+ SysBusDevice busdev;
+ /*< public >*/
+
+ MemoryRegion shinfo_mem;
+ void *shinfo_ptr;
+ uint64_t shinfo_gpa;
+ bool long_mode;
+};
+
+struct XenOverlayState *xen_overlay_singleton;
+
+void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa)
+{
+ /*
+ * Xen allows guests to map the same page as many times as it likes
+ * into guest physical frames. We don't, because it would be hard
+ * to track and restore them all. One mapping of each page is
+ * perfectly sufficient for all known guests... and we've tested
+ * that theory on a few now in other implementations. dwmw2.
+ */
+ if (memory_region_is_mapped(page)) {
+ if (gpa == INVALID_GPA) {
+ memory_region_del_subregion(get_system_memory(), page);
+ } else {
+ /* Just move it */
+ memory_region_set_address(page, gpa);
+ }
+ } else if (gpa != INVALID_GPA) {
+ memory_region_add_subregion_overlap(get_system_memory(), gpa, page, 0);
+ }
+}
+
+/* KVM is the only existing back end for now. Let's not overengineer it yet. */
+static int xen_overlay_set_be_shinfo(uint64_t gfn)
+{
+ struct kvm_xen_hvm_attr xa = {
+ .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
+ .u.shared_info.gfn = gfn,
+ };
+
+ return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa);
+}
+
+
+static void xen_overlay_realize(DeviceState *dev, Error **errp)
+{
+ XenOverlayState *s = XEN_OVERLAY(dev);
+
+ if (xen_mode != XEN_EMULATE) {
+ error_setg(errp, "Xen overlay page support is for Xen emulation");
+ return;
+ }
+
+ memory_region_init_ram(&s->shinfo_mem, OBJECT(dev), "xen:shared_info",
+ XEN_PAGE_SIZE, &error_abort);
+ memory_region_set_enabled(&s->shinfo_mem, true);
+
+ s->shinfo_ptr = memory_region_get_ram_ptr(&s->shinfo_mem);
+ s->shinfo_gpa = INVALID_GPA;
+ s->long_mode = false;
+ memset(s->shinfo_ptr, 0, XEN_PAGE_SIZE);
+}
+
+static int xen_overlay_pre_save(void *opaque)
+{
+ /*
+ * Fetch the kernel's idea of long_mode to avoid the race condition
+ * where the guest has set the hypercall page up in 64-bit mode but
+ * not yet made a hypercall by the time migration happens, so qemu
+ * hasn't yet noticed.
+ */
+ return xen_sync_long_mode();
+}
+
+static int xen_overlay_post_load(void *opaque, int version_id)
+{
+ XenOverlayState *s = opaque;
+
+ if (s->shinfo_gpa != INVALID_GPA) {
+ xen_overlay_do_map_page(&s->shinfo_mem, s->shinfo_gpa);
+ xen_overlay_set_be_shinfo(s->shinfo_gpa >> XEN_PAGE_SHIFT);
+ }
+ if (s->long_mode) {
+ xen_set_long_mode(true);
+ }
+
+ return 0;
+}
+
+static bool xen_overlay_is_needed(void *opaque)
+{
+ return xen_mode == XEN_EMULATE;
+}
+
+static const VMStateDescription xen_overlay_vmstate = {
+ .name = "xen_overlay",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xen_overlay_is_needed,
+ .pre_save = xen_overlay_pre_save,
+ .post_load = xen_overlay_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(shinfo_gpa, XenOverlayState),
+ VMSTATE_BOOL(long_mode, XenOverlayState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void xen_overlay_reset(DeviceState *dev)
+{
+ kvm_xen_soft_reset();
+}
+
+static void xen_overlay_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = xen_overlay_reset;
+ dc->realize = xen_overlay_realize;
+ dc->vmsd = &xen_overlay_vmstate;
+}
+
+static const TypeInfo xen_overlay_info = {
+ .name = TYPE_XEN_OVERLAY,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XenOverlayState),
+ .class_init = xen_overlay_class_init,
+};
+
+void xen_overlay_create(void)
+{
+ xen_overlay_singleton = XEN_OVERLAY(sysbus_create_simple(TYPE_XEN_OVERLAY,
+ -1, NULL));
+
+ /* If xen_domid wasn't explicitly set, at least make sure it isn't zero. */
+ if (xen_domid == DOMID_QEMU) {
+ xen_domid = 1;
+ };
+}
+
+static void xen_overlay_register_types(void)
+{
+ type_register_static(&xen_overlay_info);
+}
+
+type_init(xen_overlay_register_types)
+
+int xen_overlay_map_shinfo_page(uint64_t gpa)
+{
+ XenOverlayState *s = xen_overlay_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOENT;
+ }
+
+ assert(qemu_mutex_iothread_locked());
+
+ if (s->shinfo_gpa) {
+ /* If removing shinfo page, turn the kernel magic off first */
+ ret = xen_overlay_set_be_shinfo(INVALID_GFN);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ xen_overlay_do_map_page(&s->shinfo_mem, gpa);
+ if (gpa != INVALID_GPA) {
+ ret = xen_overlay_set_be_shinfo(gpa >> XEN_PAGE_SHIFT);
+ if (ret) {
+ return ret;
+ }
+ }
+ s->shinfo_gpa = gpa;
+
+ return 0;
+}
+
+void *xen_overlay_get_shinfo_ptr(void)
+{
+ XenOverlayState *s = xen_overlay_singleton;
+
+ if (!s) {
+ return NULL;
+ }
+
+ return s->shinfo_ptr;
+}
+
+int xen_sync_long_mode(void)
+{
+ int ret;
+ struct kvm_xen_hvm_attr xa = {
+ .type = KVM_XEN_ATTR_TYPE_LONG_MODE,
+ };
+
+ if (!xen_overlay_singleton) {
+ return -ENOENT;
+ }
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_GET_ATTR, &xa);
+ if (!ret) {
+ xen_overlay_singleton->long_mode = xa.u.long_mode;
+ }
+
+ return ret;
+}
+
+int xen_set_long_mode(bool long_mode)
+{
+ int ret;
+ struct kvm_xen_hvm_attr xa = {
+ .type = KVM_XEN_ATTR_TYPE_LONG_MODE,
+ .u.long_mode = long_mode,
+ };
+
+ if (!xen_overlay_singleton) {
+ return -ENOENT;
+ }
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa);
+ if (!ret) {
+ xen_overlay_singleton->long_mode = xa.u.long_mode;
+ }
+
+ return ret;
+}
+
+bool xen_is_long_mode(void)
+{
+ return xen_overlay_singleton && xen_overlay_singleton->long_mode;
+}
diff --git a/hw/i386/kvm/xen_overlay.h b/hw/i386/kvm/xen_overlay.h
new file mode 100644
index 0000000000..75ecb6b359
--- /dev/null
+++ b/hw/i386/kvm/xen_overlay.h
@@ -0,0 +1,26 @@
+/*
+ * QEMU Xen emulation: Shared/overlay pages support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_XEN_OVERLAY_H
+#define QEMU_XEN_OVERLAY_H
+
+void xen_overlay_create(void);
+
+int xen_overlay_map_shinfo_page(uint64_t gpa);
+void *xen_overlay_get_shinfo_ptr(void);
+
+int xen_sync_long_mode(void);
+int xen_set_long_mode(bool long_mode);
+bool xen_is_long_mode(void);
+
+void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa);
+
+#endif /* QEMU_XEN_OVERLAY_H */
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
new file mode 100644
index 0000000000..14193ef3f9
--- /dev/null
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -0,0 +1,500 @@
+/*
+ * QEMU Xen emulation: Shared/overlay pages support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu/host-utils.h"
+#include "qemu/module.h"
+#include "qemu/main-loop.h"
+#include "qemu/cutils.h"
+#include "qapi/error.h"
+#include "qom/object.h"
+#include "migration/vmstate.h"
+
+#include "hw/sysbus.h"
+#include "hw/xen/xen.h"
+#include "xen_overlay.h"
+#include "xen_evtchn.h"
+#include "xen_xenstore.h"
+
+#include "sysemu/kvm.h"
+#include "sysemu/kvm_xen.h"
+
+#include "hw/xen/interface/io/xs_wire.h"
+#include "hw/xen/interface/event_channel.h"
+
+#define TYPE_XEN_XENSTORE "xen-xenstore"
+OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState, XEN_XENSTORE)
+
+#define XEN_PAGE_SHIFT 12
+#define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT)
+
+#define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
+#define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t))
+
+#define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg))
+
+struct XenXenstoreState {
+ /*< private >*/
+ SysBusDevice busdev;
+ /*< public >*/
+
+ MemoryRegion xenstore_page;
+ struct xenstore_domain_interface *xs;
+ uint8_t req_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX];
+ uint8_t rsp_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX];
+ uint32_t req_offset;
+ uint32_t rsp_offset;
+ bool rsp_pending;
+ bool fatal_error;
+
+ evtchn_port_t guest_port;
+ evtchn_port_t be_port;
+ struct xenevtchn_handle *eh;
+};
+
+struct XenXenstoreState *xen_xenstore_singleton;
+
+static void xen_xenstore_event(void *opaque);
+
+static void xen_xenstore_realize(DeviceState *dev, Error **errp)
+{
+ XenXenstoreState *s = XEN_XENSTORE(dev);
+
+ if (xen_mode != XEN_EMULATE) {
+ error_setg(errp, "Xen xenstore support is for Xen emulation");
+ return;
+ }
+ memory_region_init_ram(&s->xenstore_page, OBJECT(dev), "xen:xenstore_page",
+ XEN_PAGE_SIZE, &error_abort);
+ memory_region_set_enabled(&s->xenstore_page, true);
+ s->xs = memory_region_get_ram_ptr(&s->xenstore_page);
+ memset(s->xs, 0, XEN_PAGE_SIZE);
+
+ /* We can't map it this early as KVM isn't ready */
+ xen_xenstore_singleton = s;
+
+ s->eh = xen_be_evtchn_open();
+ if (!s->eh) {
+ error_setg(errp, "Xenstore evtchn port init failed");
+ return;
+ }
+ aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), true,
+ xen_xenstore_event, NULL, NULL, NULL, s);
+}
+
+static bool xen_xenstore_is_needed(void *opaque)
+{
+ return xen_mode == XEN_EMULATE;
+}
+
+static int xen_xenstore_pre_save(void *opaque)
+{
+ XenXenstoreState *s = opaque;
+
+ if (s->eh) {
+ s->guest_port = xen_be_evtchn_get_guest_port(s->eh);
+ }
+ return 0;
+}
+
+static int xen_xenstore_post_load(void *opaque, int ver)
+{
+ XenXenstoreState *s = opaque;
+
+ /*
+ * As qemu/dom0, rebind to the guest's port. The Windows drivers may
+ * unbind the XenStore evtchn and rebind to it, having obtained the
+ * "remote" port through EVTCHNOP_status. In the case that migration
+ * occurs while it's unbound, the "remote" port needs to be the same
+ * as before so that the guest can find it, but should remain unbound.
+ */
+ if (s->guest_port) {
+ int be_port = xen_be_evtchn_bind_interdomain(s->eh, xen_domid,
+ s->guest_port);
+ if (be_port < 0) {
+ return be_port;
+ }
+ s->be_port = be_port;
+ }
+ return 0;
+}
+
+static const VMStateDescription xen_xenstore_vmstate = {
+ .name = "xen_xenstore",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xen_xenstore_is_needed,
+ .pre_save = xen_xenstore_pre_save,
+ .post_load = xen_xenstore_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState,
+ sizeof_field(XenXenstoreState, req_data)),
+ VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState,
+ sizeof_field(XenXenstoreState, rsp_data)),
+ VMSTATE_UINT32(req_offset, XenXenstoreState),
+ VMSTATE_UINT32(rsp_offset, XenXenstoreState),
+ VMSTATE_BOOL(rsp_pending, XenXenstoreState),
+ VMSTATE_UINT32(guest_port, XenXenstoreState),
+ VMSTATE_BOOL(fatal_error, XenXenstoreState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void xen_xenstore_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = xen_xenstore_realize;
+ dc->vmsd = &xen_xenstore_vmstate;
+}
+
+static const TypeInfo xen_xenstore_info = {
+ .name = TYPE_XEN_XENSTORE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XenXenstoreState),
+ .class_init = xen_xenstore_class_init,
+};
+
+void xen_xenstore_create(void)
+{
+ DeviceState *dev = sysbus_create_simple(TYPE_XEN_XENSTORE, -1, NULL);
+
+ xen_xenstore_singleton = XEN_XENSTORE(dev);
+
+ /*
+ * Defer the init (xen_xenstore_reset()) until KVM is set up and the
+ * overlay page can be mapped.
+ */
+}
+
+static void xen_xenstore_register_types(void)
+{
+ type_register_static(&xen_xenstore_info);
+}
+
+type_init(xen_xenstore_register_types)
+
+uint16_t xen_xenstore_get_port(void)
+{
+ XenXenstoreState *s = xen_xenstore_singleton;
+ if (!s) {
+ return 0;
+ }
+ return s->guest_port;
+}
+
+static bool req_pending(XenXenstoreState *s)
+{
+ struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+
+ return s->req_offset == XENSTORE_HEADER_SIZE + req->len;
+}
+
+static void reset_req(XenXenstoreState *s)
+{
+ memset(s->req_data, 0, sizeof(s->req_data));
+ s->req_offset = 0;
+}
+
+static void reset_rsp(XenXenstoreState *s)
+{
+ s->rsp_pending = false;
+
+ memset(s->rsp_data, 0, sizeof(s->rsp_data));
+ s->rsp_offset = 0;
+}
+
+static void process_req(XenXenstoreState *s)
+{
+ struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ const char enosys[] = "ENOSYS";
+
+ assert(req_pending(s));
+ assert(!s->rsp_pending);
+
+ rsp->type = XS_ERROR;
+ rsp->req_id = req->req_id;
+ rsp->tx_id = req->tx_id;
+ rsp->len = sizeof(enosys);
+ memcpy((void *)&rsp[1], enosys, sizeof(enosys));
+
+ s->rsp_pending = true;
+ reset_req(s);
+}
+
+static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr,
+ unsigned int len)
+{
+ if (!len) {
+ return 0;
+ }
+
+ XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod);
+ XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons);
+ unsigned int copied = 0;
+
+ /* Ensure the ring contents don't cross the req_prod access. */
+ smp_rmb();
+
+ while (len) {
+ unsigned int avail = prod - cons;
+ unsigned int offset = MASK_XENSTORE_IDX(cons);
+ unsigned int copylen = avail;
+
+ if (avail > XENSTORE_RING_SIZE) {
+ error_report("XenStore ring handling error");
+ s->fatal_error = true;
+ break;
+ } else if (avail == 0) {
+ break;
+ }
+
+ if (copylen > len) {
+ copylen = len;
+ }
+ if (copylen > XENSTORE_RING_SIZE - offset) {
+ copylen = XENSTORE_RING_SIZE - offset;
+ }
+
+ memcpy(ptr, &s->xs->req[offset], copylen);
+ copied += copylen;
+
+ ptr += copylen;
+ len -= copylen;
+
+ cons += copylen;
+ }
+
+ /*
+ * Not sure this ever mattered except on Alpha, but this barrier
+ * is to ensure that the update to req_cons is globally visible
+ * only after we have consumed all the data from the ring, and we
+ * don't end up seeing data written to the ring *after* the other
+ * end sees the update and writes more to the ring. Xen's own
+ * xenstored has the same barrier here (although with no comment
+ * at all, obviously, because it's Xen code).
+ */
+ smp_mb();
+
+ qatomic_set(&s->xs->req_cons, cons);
+
+ return copied;
+}
+
+static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr,
+ unsigned int len)
+{
+ if (!len) {
+ return 0;
+ }
+
+ XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons);
+ XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod);
+ unsigned int copied = 0;
+
+ /*
+ * This matches the barrier in copy_to_ring() (or the guest's
+ * equivalent) betweem writing the data to the ring and updating
+ * rsp_prod. It protects against the pathological case (which
+ * again I think never happened except on Alpha) where our
+ * subsequent writes to the ring could *cross* the read of
+ * rsp_cons and the guest could see the new data when it was
+ * intending to read the old.
+ */
+ smp_mb();
+
+ while (len) {
+ unsigned int avail = cons + XENSTORE_RING_SIZE - prod;
+ unsigned int offset = MASK_XENSTORE_IDX(prod);
+ unsigned int copylen = len;
+
+ if (avail > XENSTORE_RING_SIZE) {
+ error_report("XenStore ring handling error");
+ s->fatal_error = true;
+ break;
+ } else if (avail == 0) {
+ break;
+ }
+
+ if (copylen > avail) {
+ copylen = avail;
+ }
+ if (copylen > XENSTORE_RING_SIZE - offset) {
+ copylen = XENSTORE_RING_SIZE - offset;
+ }
+
+
+ memcpy(&s->xs->rsp[offset], ptr, copylen);
+ copied += copylen;
+
+ ptr += copylen;
+ len -= copylen;
+
+ prod += copylen;
+ }
+
+ /* Ensure the ring contents are seen before rsp_prod update. */
+ smp_wmb();
+
+ qatomic_set(&s->xs->rsp_prod, prod);
+
+ return copied;
+}
+
+static unsigned int get_req(XenXenstoreState *s)
+{
+ unsigned int copied = 0;
+
+ if (s->fatal_error) {
+ return 0;
+ }
+
+ assert(!req_pending(s));
+
+ if (s->req_offset < XENSTORE_HEADER_SIZE) {
+ void *ptr = s->req_data + s->req_offset;
+ unsigned int len = XENSTORE_HEADER_SIZE;
+ unsigned int copylen = copy_from_ring(s, ptr, len);
+
+ copied += copylen;
+ s->req_offset += copylen;
+ }
+
+ if (s->req_offset >= XENSTORE_HEADER_SIZE) {
+ struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+
+ if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) {
+ error_report("Illegal XenStore request");
+ s->fatal_error = true;
+ return 0;
+ }
+
+ void *ptr = s->req_data + s->req_offset;
+ unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset;
+ unsigned int copylen = copy_from_ring(s, ptr, len);
+
+ copied += copylen;
+ s->req_offset += copylen;
+ }
+
+ return copied;
+}
+
+static unsigned int put_rsp(XenXenstoreState *s)
+{
+ if (s->fatal_error) {
+ return 0;
+ }
+
+ assert(s->rsp_pending);
+
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len);
+
+ void *ptr = s->rsp_data + s->rsp_offset;
+ unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset;
+ unsigned int copylen = copy_to_ring(s, ptr, len);
+
+ s->rsp_offset += copylen;
+
+ /* Have we produced a complete response? */
+ if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len) {
+ reset_rsp(s);
+ }
+
+ return copylen;
+}
+
+static void xen_xenstore_event(void *opaque)
+{
+ XenXenstoreState *s = opaque;
+ evtchn_port_t port = xen_be_evtchn_pending(s->eh);
+ unsigned int copied_to, copied_from;
+ bool processed, notify = false;
+
+ if (port != s->be_port) {
+ return;
+ }
+
+ /* We know this is a no-op. */
+ xen_be_evtchn_unmask(s->eh, port);
+
+ do {
+ copied_to = copied_from = 0;
+ processed = false;
+
+ if (s->rsp_pending) {
+ copied_to = put_rsp(s);
+ }
+
+ if (!req_pending(s)) {
+ copied_from = get_req(s);
+ }
+
+ if (req_pending(s) && !s->rsp_pending) {
+ process_req(s);
+ processed = true;
+ }
+
+ notify |= copied_to || copied_from;
+ } while (copied_to || copied_from || processed);
+
+ if (notify) {
+ xen_be_evtchn_notify(s->eh, s->be_port);
+ }
+}
+
+static void alloc_guest_port(XenXenstoreState *s)
+{
+ struct evtchn_alloc_unbound alloc = {
+ .dom = DOMID_SELF,
+ .remote_dom = DOMID_QEMU,
+ };
+
+ if (!xen_evtchn_alloc_unbound_op(&alloc)) {
+ s->guest_port = alloc.port;
+ }
+}
+
+int xen_xenstore_reset(void)
+{
+ XenXenstoreState *s = xen_xenstore_singleton;
+ int err;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ s->req_offset = s->rsp_offset = 0;
+ s->rsp_pending = false;
+
+ if (!memory_region_is_mapped(&s->xenstore_page)) {
+ uint64_t gpa = XEN_SPECIAL_PFN(XENSTORE) << TARGET_PAGE_BITS;
+ xen_overlay_do_map_page(&s->xenstore_page, gpa);
+ }
+
+ alloc_guest_port(s);
+
+ /*
+ * As qemu/dom0, bind to the guest's port. For incoming migration, this
+ * will be unbound as the guest's evtchn table is overwritten. We then
+ * rebind to the correct guest port in xen_xenstore_post_load().
+ */
+ err = xen_be_evtchn_bind_interdomain(s->eh, xen_domid, s->guest_port);
+ if (err < 0) {
+ return err;
+ }
+ s->be_port = err;
+
+ return 0;
+}
diff --git a/hw/i386/kvm/xen_xenstore.h b/hw/i386/kvm/xen_xenstore.h
new file mode 100644
index 0000000000..8c3768e075
--- /dev/null
+++ b/hw/i386/kvm/xen_xenstore.h
@@ -0,0 +1,20 @@
+/*
+ * QEMU Xen emulation: Xenstore emulation
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_XEN_XENSTORE_H
+#define QEMU_XEN_XENSTORE_H
+
+void xen_xenstore_create(void);
+int xen_xenstore_reset(void);
+
+uint16_t xen_xenstore_get_port(void);
+
+#endif /* QEMU_XEN_XENSTORE_H */
diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c
index 29f30dd6d3..68c22016d2 100644
--- a/hw/i386/microvm.c
+++ b/hw/i386/microvm.c
@@ -57,14 +57,14 @@
#define MICROVM_QBOOT_FILENAME "qboot.rom"
#define MICROVM_BIOS_FILENAME "bios-microvm.bin"
-static void microvm_set_rtc(MicrovmMachineState *mms, ISADevice *s)
+static void microvm_set_rtc(MicrovmMachineState *mms, MC146818RtcState *s)
{
X86MachineState *x86ms = X86_MACHINE(mms);
int val;
val = MIN(x86ms->below_4g_mem_size / KiB, 640);
- rtc_set_memory(s, 0x15, val);
- rtc_set_memory(s, 0x16, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x15, val);
+ mc146818rtc_set_cmos_data(s, 0x16, val >> 8);
/* extended memory (next 64MiB) */
if (x86ms->below_4g_mem_size > 1 * MiB) {
val = (x86ms->below_4g_mem_size - 1 * MiB) / KiB;
@@ -74,10 +74,10 @@ static void microvm_set_rtc(MicrovmMachineState *mms, ISADevice *s)
if (val > 65535) {
val = 65535;
}
- rtc_set_memory(s, 0x17, val);
- rtc_set_memory(s, 0x18, val >> 8);
- rtc_set_memory(s, 0x30, val);
- rtc_set_memory(s, 0x31, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x17, val);
+ mc146818rtc_set_cmos_data(s, 0x18, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x30, val);
+ mc146818rtc_set_cmos_data(s, 0x31, val >> 8);
/* memory between 16MiB and 4GiB */
if (x86ms->below_4g_mem_size > 16 * MiB) {
val = (x86ms->below_4g_mem_size - 16 * MiB) / (64 * KiB);
@@ -87,13 +87,13 @@ static void microvm_set_rtc(MicrovmMachineState *mms, ISADevice *s)
if (val > 65535) {
val = 65535;
}
- rtc_set_memory(s, 0x34, val);
- rtc_set_memory(s, 0x35, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x34, val);
+ mc146818rtc_set_cmos_data(s, 0x35, val >> 8);
/* memory above 4GiB */
val = x86ms->above_4g_mem_size / 65536;
- rtc_set_memory(s, 0x5b, val);
- rtc_set_memory(s, 0x5c, val >> 8);
- rtc_set_memory(s, 0x5d, val >> 16);
+ mc146818rtc_set_cmos_data(s, 0x5b, val);
+ mc146818rtc_set_cmos_data(s, 0x5c, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x5d, val >> 16);
}
static void create_gpex(MicrovmMachineState *mms)
@@ -161,7 +161,6 @@ static void microvm_devices_init(MicrovmMachineState *mms)
const char *default_firmware;
X86MachineState *x86ms = X86_MACHINE(mms);
ISABus *isa_bus;
- ISADevice *rtc_state;
GSIState *gsi_state;
int ioapics;
int i;
@@ -174,7 +173,7 @@ static void microvm_devices_init(MicrovmMachineState *mms)
isa_bus = isa_bus_new(NULL, get_system_memory(), get_system_io(),
&error_abort);
- isa_bus_irqs(isa_bus, x86ms->gsi);
+ isa_bus_register_input_irqs(isa_bus, x86ms->gsi);
ioapic_init_gsi(gsi_state, "machine");
if (ioapics > 1) {
@@ -267,8 +266,7 @@ static void microvm_devices_init(MicrovmMachineState *mms)
if (mms->rtc == ON_OFF_AUTO_ON ||
(mms->rtc == ON_OFF_AUTO_AUTO && !kvm_enabled())) {
- rtc_state = mc146818_rtc_init(isa_bus, 2000, NULL);
- microvm_set_rtc(mms, rtc_state);
+ microvm_set_rtc(mms, mc146818_rtc_init(isa_bus, 2000, NULL));
}
if (mms->isa_serial) {
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index a7a2ededf9..fd17ce7a94 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -28,13 +28,13 @@
#include "hw/i386/pc.h"
#include "hw/char/serial.h"
#include "hw/char/parallel.h"
-#include "hw/i386/apic.h"
#include "hw/i386/topology.h"
#include "hw/i386/fw_cfg.h"
#include "hw/i386/vmport.h"
#include "sysemu/cpus.h"
#include "hw/block/fdc.h"
-#include "hw/ide.h"
+#include "hw/ide/internal.h"
+#include "hw/ide/isa.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci-bridge/pci_expander_bridge.h"
@@ -47,6 +47,7 @@
#include "multiboot.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/intc/i8259.h"
+#include "hw/intc/ioapic.h"
#include "hw/timer/i8254.h"
#include "hw/input/i8042.h"
#include "hw/irq.h"
@@ -89,6 +90,10 @@
#include "hw/virtio/virtio-iommu.h"
#include "hw/virtio/virtio-pmem-pci.h"
#include "hw/virtio/virtio-mem-pci.h"
+#include "hw/i386/kvm/xen_overlay.h"
+#include "hw/i386/kvm/xen_evtchn.h"
+#include "hw/i386/kvm/xen_gnttab.h"
+#include "hw/i386/kvm/xen_xenstore.h"
#include "hw/mem/memory-device.h"
#include "sysemu/replay.h"
#include "target/i386/cpu.h"
@@ -405,7 +410,7 @@ GSIState *pc_gsi_create(qemu_irq **irqs, bool pci_enabled)
if (kvm_ioapic_in_kernel()) {
kvm_pc_setup_irq_routing(pci_enabled);
}
- *irqs = qemu_allocate_irqs(gsi_handler, s, GSI_NUM_PINS);
+ *irqs = qemu_allocate_irqs(gsi_handler, s, IOAPIC_NUM_PINS);
return s;
}
@@ -438,19 +443,19 @@ static uint64_t ioportF0_read(void *opaque, hwaddr addr, unsigned size)
#define REG_EQUIPMENT_BYTE 0x14
-static void cmos_init_hd(ISADevice *s, int type_ofs, int info_ofs,
+static void cmos_init_hd(MC146818RtcState *s, int type_ofs, int info_ofs,
int16_t cylinders, int8_t heads, int8_t sectors)
{
- rtc_set_memory(s, type_ofs, 47);
- rtc_set_memory(s, info_ofs, cylinders);
- rtc_set_memory(s, info_ofs + 1, cylinders >> 8);
- rtc_set_memory(s, info_ofs + 2, heads);
- rtc_set_memory(s, info_ofs + 3, 0xff);
- rtc_set_memory(s, info_ofs + 4, 0xff);
- rtc_set_memory(s, info_ofs + 5, 0xc0 | ((heads > 8) << 3));
- rtc_set_memory(s, info_ofs + 6, cylinders);
- rtc_set_memory(s, info_ofs + 7, cylinders >> 8);
- rtc_set_memory(s, info_ofs + 8, sectors);
+ mc146818rtc_set_cmos_data(s, type_ofs, 47);
+ mc146818rtc_set_cmos_data(s, info_ofs, cylinders);
+ mc146818rtc_set_cmos_data(s, info_ofs + 1, cylinders >> 8);
+ mc146818rtc_set_cmos_data(s, info_ofs + 2, heads);
+ mc146818rtc_set_cmos_data(s, info_ofs + 3, 0xff);
+ mc146818rtc_set_cmos_data(s, info_ofs + 4, 0xff);
+ mc146818rtc_set_cmos_data(s, info_ofs + 5, 0xc0 | ((heads > 8) << 3));
+ mc146818rtc_set_cmos_data(s, info_ofs + 6, cylinders);
+ mc146818rtc_set_cmos_data(s, info_ofs + 7, cylinders >> 8);
+ mc146818rtc_set_cmos_data(s, info_ofs + 8, sectors);
}
/* convert boot_device letter to something recognizable by the bios */
@@ -470,7 +475,8 @@ static int boot_device2nibble(char boot_device)
return 0;
}
-static void set_boot_dev(ISADevice *s, const char *boot_device, Error **errp)
+static void set_boot_dev(MC146818RtcState *s, const char *boot_device,
+ Error **errp)
{
#define PC_MAX_BOOT_DEVICES 3
int nbds, bds[3] = { 0, };
@@ -489,8 +495,8 @@ static void set_boot_dev(ISADevice *s, const char *boot_device, Error **errp)
return;
}
}
- rtc_set_memory(s, 0x3d, (bds[1] << 4) | bds[0]);
- rtc_set_memory(s, 0x38, (bds[2] << 4) | (fd_bootchk ? 0x0 : 0x1));
+ mc146818rtc_set_cmos_data(s, 0x3d, (bds[1] << 4) | bds[0]);
+ mc146818rtc_set_cmos_data(s, 0x38, (bds[2] << 4) | (fd_bootchk ? 0x0 : 0x1));
}
static void pc_boot_set(void *opaque, const char *boot_device, Error **errp)
@@ -498,7 +504,7 @@ static void pc_boot_set(void *opaque, const char *boot_device, Error **errp)
set_boot_dev(opaque, boot_device, errp);
}
-static void pc_cmos_init_floppy(ISADevice *rtc_state, ISADevice *floppy)
+static void pc_cmos_init_floppy(MC146818RtcState *rtc_state, ISADevice *floppy)
{
int val, nb, i;
FloppyDriveType fd_type[2] = { FLOPPY_DRIVE_TYPE_NONE,
@@ -512,9 +518,9 @@ static void pc_cmos_init_floppy(ISADevice *rtc_state, ISADevice *floppy)
}
val = (cmos_get_fd_drive_type(fd_type[0]) << 4) |
cmos_get_fd_drive_type(fd_type[1]);
- rtc_set_memory(rtc_state, 0x10, val);
+ mc146818rtc_set_cmos_data(rtc_state, 0x10, val);
- val = rtc_get_memory(rtc_state, REG_EQUIPMENT_BYTE);
+ val = mc146818rtc_get_cmos_data(rtc_state, REG_EQUIPMENT_BYTE);
nb = 0;
if (fd_type[0] != FLOPPY_DRIVE_TYPE_NONE) {
nb++;
@@ -532,11 +538,11 @@ static void pc_cmos_init_floppy(ISADevice *rtc_state, ISADevice *floppy)
val |= 0x41; /* 2 drives, ready for boot */
break;
}
- rtc_set_memory(rtc_state, REG_EQUIPMENT_BYTE, val);
+ mc146818rtc_set_cmos_data(rtc_state, REG_EQUIPMENT_BYTE, val);
}
typedef struct pc_cmos_init_late_arg {
- ISADevice *rtc_state;
+ MC146818RtcState *rtc_state;
BusState *idebus[2];
} pc_cmos_init_late_arg;
@@ -603,7 +609,7 @@ static ISADevice *pc_find_fdc0(void)
static void pc_cmos_init_late(void *opaque)
{
pc_cmos_init_late_arg *arg = opaque;
- ISADevice *s = arg->rtc_state;
+ MC146818RtcState *s = arg->rtc_state;
int16_t cylinders;
int8_t heads, sectors;
int val;
@@ -620,7 +626,7 @@ static void pc_cmos_init_late(void *opaque)
cmos_init_hd(s, 0x1a, 0x24, cylinders, heads, sectors);
val |= 0x0f;
}
- rtc_set_memory(s, 0x12, val);
+ mc146818rtc_set_cmos_data(s, 0x12, val);
val = 0;
for (i = 0; i < 4; i++) {
@@ -636,7 +642,7 @@ static void pc_cmos_init_late(void *opaque)
val |= trans << (i * 2);
}
}
- rtc_set_memory(s, 0x39, val);
+ mc146818rtc_set_cmos_data(s, 0x39, val);
pc_cmos_init_floppy(s, pc_find_fdc0());
@@ -645,19 +651,20 @@ static void pc_cmos_init_late(void *opaque)
void pc_cmos_init(PCMachineState *pcms,
BusState *idebus0, BusState *idebus1,
- ISADevice *s)
+ ISADevice *rtc)
{
int val;
static pc_cmos_init_late_arg arg;
X86MachineState *x86ms = X86_MACHINE(pcms);
+ MC146818RtcState *s = MC146818_RTC(rtc);
/* various important CMOS locations needed by PC/Bochs bios */
/* memory size */
/* base memory (first MiB) */
val = MIN(x86ms->below_4g_mem_size / KiB, 640);
- rtc_set_memory(s, 0x15, val);
- rtc_set_memory(s, 0x16, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x15, val);
+ mc146818rtc_set_cmos_data(s, 0x16, val >> 8);
/* extended memory (next 64MiB) */
if (x86ms->below_4g_mem_size > 1 * MiB) {
val = (x86ms->below_4g_mem_size - 1 * MiB) / KiB;
@@ -666,10 +673,10 @@ void pc_cmos_init(PCMachineState *pcms,
}
if (val > 65535)
val = 65535;
- rtc_set_memory(s, 0x17, val);
- rtc_set_memory(s, 0x18, val >> 8);
- rtc_set_memory(s, 0x30, val);
- rtc_set_memory(s, 0x31, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x17, val);
+ mc146818rtc_set_cmos_data(s, 0x18, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x30, val);
+ mc146818rtc_set_cmos_data(s, 0x31, val >> 8);
/* memory between 16MiB and 4GiB */
if (x86ms->below_4g_mem_size > 16 * MiB) {
val = (x86ms->below_4g_mem_size - 16 * MiB) / (64 * KiB);
@@ -678,13 +685,13 @@ void pc_cmos_init(PCMachineState *pcms,
}
if (val > 65535)
val = 65535;
- rtc_set_memory(s, 0x34, val);
- rtc_set_memory(s, 0x35, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x34, val);
+ mc146818rtc_set_cmos_data(s, 0x35, val >> 8);
/* memory above 4GiB */
val = x86ms->above_4g_mem_size / 65536;
- rtc_set_memory(s, 0x5b, val);
- rtc_set_memory(s, 0x5c, val >> 8);
- rtc_set_memory(s, 0x5d, val >> 16);
+ mc146818rtc_set_cmos_data(s, 0x5b, val);
+ mc146818rtc_set_cmos_data(s, 0x5c, val >> 8);
+ mc146818rtc_set_cmos_data(s, 0x5d, val >> 16);
object_property_add_link(OBJECT(pcms), "rtc_state",
TYPE_ISA_DEVICE,
@@ -699,7 +706,7 @@ void pc_cmos_init(PCMachineState *pcms,
val = 0;
val |= 0x02; /* FPU is there */
val |= 0x04; /* PS/2 mouse installed */
- rtc_set_memory(s, REG_EQUIPMENT_BYTE, val);
+ mc146818rtc_set_cmos_data(s, REG_EQUIPMENT_BYTE, val);
/* hard drives and FDC */
arg.rtc_state = s;
@@ -1296,14 +1303,23 @@ void pc_basic_device_init(struct PCMachineState *pcms,
sysbus_realize_and_unref(SYS_BUS_DEVICE(hpet), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(hpet), 0, HPET_BASE);
- for (i = 0; i < GSI_NUM_PINS; i++) {
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
sysbus_connect_irq(SYS_BUS_DEVICE(hpet), i, gsi[i]);
}
pit_isa_irq = -1;
pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT);
rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT);
}
- *rtc_state = mc146818_rtc_init(isa_bus, 2000, rtc_irq);
+ *rtc_state = ISA_DEVICE(mc146818_rtc_init(isa_bus, 2000, rtc_irq));
+
+#ifdef CONFIG_XEN_EMU
+ if (xen_mode == XEN_EMULATE) {
+ xen_evtchn_connect_gsis(gsi);
+ if (pcms->bus) {
+ pci_create_simple(pcms->bus, -1, "xen-platform");
+ }
+ }
+#endif
qemu_register_boot_set(pc_boot_set, *rtc_state);
@@ -1843,6 +1859,19 @@ static void pc_machine_initfn(Object *obj)
cxl_machine_init(obj, &pcms->cxl_devices_state);
}
+int pc_machine_kvm_type(MachineState *machine, const char *kvm_type)
+{
+#ifdef CONFIG_XEN_EMU
+ if (xen_mode == XEN_EMULATE) {
+ xen_overlay_create();
+ xen_evtchn_create();
+ xen_gnttab_create();
+ xen_xenstore_create();
+ }
+#endif
+ return 0;
+}
+
static void pc_machine_reset(MachineState *machine, ShutdownCause reason)
{
CPUState *cs;
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index df64dd8dcc..126b6c11df 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -39,6 +39,7 @@
#include "hw/pci/pci_ids.h"
#include "hw/usb.h"
#include "net/net.h"
+#include "hw/ide/isa.h"
#include "hw/ide/pci.h"
#include "hw/ide/piix.h"
#include "hw/irq.h"
@@ -246,7 +247,7 @@ static void pc_init1(MachineState *machine,
i8257_dma_init(isa_bus, 0);
pcms->hpet_enabled = false;
}
- isa_bus_irqs(isa_bus, x86ms->gsi);
+ isa_bus_register_input_irqs(isa_bus, x86ms->gsi);
if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) {
pc_i8259_create(isa_bus, gsi_state->i8259_irq);
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 66cd718b70..09004f3f1f 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -40,13 +40,14 @@
#include "hw/qdev-properties.h"
#include "hw/i386/x86.h"
#include "hw/i386/pc.h"
-#include "hw/i386/ich9.h"
#include "hw/i386/amd_iommu.h"
#include "hw/i386/intel_iommu.h"
#include "hw/display/ramfb.h"
#include "hw/firmware/smbios.h"
#include "hw/ide/pci.h"
#include "hw/ide/ahci.h"
+#include "hw/intc/ioapic.h"
+#include "hw/southbridge/ich9.h"
#include "hw/usb.h"
#include "hw/usb/hcd-uhci.h"
#include "qapi/error.h"
@@ -132,7 +133,6 @@ static void pc_q35_init(MachineState *machine)
GSIState *gsi_state;
ISABus *isa_bus;
int i;
- ICH9LPCState *ich9_lpc;
PCIDevice *ahci;
ram_addr_t lowmem;
DriveInfo *hd[MAX_SATA_PORTS];
@@ -236,9 +236,11 @@ static void pc_q35_init(MachineState *machine)
phb = PCI_HOST_BRIDGE(q35_host);
host_bus = phb->bus;
/* create ISA bus */
- lpc = pci_create_simple_multifunction(host_bus, PCI_DEVFN(ICH9_LPC_DEV,
- ICH9_LPC_FUNC), true,
- TYPE_ICH9_LPC_DEVICE);
+ lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC), true,
+ TYPE_ICH9_LPC_DEVICE);
+ qdev_prop_set_bit(DEVICE(lpc), "smm-enabled",
+ x86_machine_is_smm_enabled(x86ms));
+ pci_realize_and_unref(lpc, host_bus, &error_fatal);
object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP,
TYPE_HOTPLUG_HANDLER,
@@ -265,15 +267,11 @@ static void pc_q35_init(MachineState *machine)
/* irq lines */
gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled);
- ich9_lpc = ICH9_LPC_DEVICE(lpc);
lpc_dev = DEVICE(lpc);
- for (i = 0; i < GSI_NUM_PINS; i++) {
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, x86ms->gsi[i]);
}
- pci_bus_irqs(host_bus, ich9_lpc_set_irq, ich9_lpc, ICH9_LPC_NB_PIRQS);
- pci_bus_map_irqs(host_bus, ich9_lpc_map_irq);
- pci_bus_set_route_irq_fn(host_bus, ich9_route_intx_pin_to_irq);
- isa_bus = ich9_lpc->isa_bus;
+ isa_bus = ISA_BUS(qdev_get_child_bus(lpc_dev, "isa.0"));
if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) {
pc_i8259_create(isa_bus, gsi_state->i8259_irq);
@@ -296,9 +294,6 @@ static void pc_q35_init(MachineState *machine)
pc_basic_device_init(pcms, isa_bus, x86ms->gsi, &rtc_state, !mc->no_floppy,
0xff0104);
- /* connect pm stuff to lpc */
- ich9_lpc_pm_init(lpc, x86_machine_is_smm_enabled(x86ms));
-
if (pcms->sata_enabled) {
/* ahci and SATA device, for q35 1 ahci controller is built-in */
ahci = pci_create_simple_multifunction(host_bus,
@@ -320,10 +315,15 @@ static void pc_q35_init(MachineState *machine)
}
if (pcms->smbus_enabled) {
+ PCIDevice *smb;
+
/* TODO: Populate SPD eeprom data. */
- pcms->smbus = ich9_smb_init(host_bus,
- PCI_DEVFN(ICH9_SMB_DEV, ICH9_SMB_FUNC),
- 0xb100);
+ smb = pci_create_simple_multifunction(host_bus,
+ PCI_DEVFN(ICH9_SMB_DEV,
+ ICH9_SMB_FUNC),
+ true, TYPE_ICH9_SMB_DEVICE);
+ pcms->smbus = I2C_BUS(qdev_get_child_bus(DEVICE(smb), "i2c"));
+
smbus_eeprom_init(pcms->smbus, 8, NULL, 0);
}
diff --git a/hw/i386/x86.c b/hw/i386/x86.c
index 48be7a1c23..a56b10b2fb 100644
--- a/hw/i386/x86.c
+++ b/hw/i386/x86.c
@@ -61,10 +61,15 @@
#include CONFIG_DEVICES
#include "kvm/kvm_i386.h"
+#ifdef CONFIG_XEN_EMU
+#include "hw/xen/xen.h"
+#include "hw/i386/kvm/xen_evtchn.h"
+#endif
+
/* Physical Address of PVH entry point read from kernel ELF NOTE */
static size_t pvh_start_addr;
-inline void init_topo_info(X86CPUTopoInfo *topo_info,
+static void init_topo_info(X86CPUTopoInfo *topo_info,
const X86MachineState *x86ms)
{
MachineState *ms = MACHINE(x86ms);
@@ -150,17 +155,19 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version)
}
}
-void x86_rtc_set_cpus_count(ISADevice *rtc, uint16_t cpus_count)
+void x86_rtc_set_cpus_count(ISADevice *s, uint16_t cpus_count)
{
+ MC146818RtcState *rtc = MC146818_RTC(s);
+
if (cpus_count > 0xff) {
/*
* If the number of CPUs can't be represented in 8 bits, the
* BIOS must use "FW_CFG_NB_CPUS". Set RTC field to 0 just
* to make old BIOSes fail more predictably.
*/
- rtc_set_memory(rtc, 0x5f, 0);
+ mc146818rtc_set_cmos_data(rtc, 0x5f, 0);
} else {
- rtc_set_memory(rtc, 0x5f, cpus_count - 1);
+ mc146818rtc_set_cmos_data(rtc, 0x5f, cpus_count - 1);
}
}
@@ -608,6 +615,17 @@ void gsi_handler(void *opaque, int n, int level)
}
/* fall through */
case ISA_NUM_IRQS ... IOAPIC_NUM_PINS - 1:
+#ifdef CONFIG_XEN_EMU
+ /*
+ * Xen delivers the GSI to the Legacy PIC (not that Legacy PIC
+ * routing actually works properly under Xen). And then to
+ * *either* the PIRQ handling or the I/OAPIC depending on
+ * whether the former wants it.
+ */
+ if (xen_mode == XEN_EMULATE && xen_evtchn_set_gsi(n, level)) {
+ break;
+ }
+#endif
qemu_set_irq(s->ioapic_irq[n], level);
break;
case IO_APIC_SECONDARY_IRQBASE
diff --git a/hw/i386/xen/meson.build b/hw/i386/xen/meson.build
index be84130300..2e64a34e16 100644
--- a/hw/i386/xen/meson.build
+++ b/hw/i386/xen/meson.build
@@ -2,6 +2,9 @@ i386_ss.add(when: 'CONFIG_XEN', if_true: files(
'xen-hvm.c',
'xen-mapcache.c',
'xen_apic.c',
- 'xen_platform.c',
'xen_pvdevice.c',
))
+
+i386_ss.add(when: 'CONFIG_XEN_BUS', if_true: files(
+ 'xen_platform.c',
+))
diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index b9a6f7f538..e5a1dd19f4 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -1502,13 +1502,7 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
device_listener_register(&state->device_listener);
xen_bus_init();
-
- /* Initialize backend core & drivers */
- if (xen_be_init() != 0) {
- error_report("xen backend core setup failed");
- goto err;
- }
- xen_be_register_common();
+ xen_be_init();
QLIST_INIT(&xen_physmap);
xen_read_physmap(state);
diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c
index 66e6de31a6..539f7da374 100644
--- a/hw/i386/xen/xen_platform.c
+++ b/hw/i386/xen/xen_platform.c
@@ -25,12 +25,11 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "hw/ide.h"
#include "hw/ide/pci.h"
#include "hw/pci/pci.h"
-#include "hw/xen/xen_common.h"
#include "migration/vmstate.h"
-#include "hw/xen/xen-legacy-backend.h"
+#include "hw/xen/xen.h"
+#include "net/net.h"
#include "trace.h"
#include "sysemu/xen.h"
#include "sysemu/block-backend.h"
@@ -38,6 +37,11 @@
#include "qemu/module.h"
#include "qom/object.h"
+#ifdef CONFIG_XEN
+#include "hw/xen/xen_common.h"
+#include "hw/xen/xen-legacy-backend.h"
+#endif
+
//#define DEBUG_PLATFORM
#ifdef DEBUG_PLATFORM
@@ -109,12 +113,25 @@ static void log_writeb(PCIXenPlatformState *s, char val)
#define _UNPLUG_NVME_DISKS 3
#define UNPLUG_NVME_DISKS (1u << _UNPLUG_NVME_DISKS)
+static bool pci_device_is_passthrough(PCIDevice *d)
+{
+ if (!strcmp(d->name, "xen-pci-passthrough")) {
+ return true;
+ }
+
+ if (xen_mode == XEN_EMULATE && !strcmp(d->name, "vfio-pci")) {
+ return true;
+ }
+
+ return false;
+}
+
static void unplug_nic(PCIBus *b, PCIDevice *d, void *o)
{
/* We have to ignore passthrough devices */
if (pci_get_word(d->config + PCI_CLASS_DEVICE) ==
PCI_CLASS_NETWORK_ETHERNET
- && strcmp(d->name, "xen-pci-passthrough") != 0) {
+ && !pci_device_is_passthrough(d)) {
object_unparent(OBJECT(d));
}
}
@@ -187,9 +204,8 @@ static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque)
!(flags & UNPLUG_IDE_SCSI_DISKS);
/* We have to ignore passthrough devices */
- if (!strcmp(d->name, "xen-pci-passthrough")) {
+ if (pci_device_is_passthrough(d))
return;
- }
switch (pci_get_word(d->config + PCI_CLASS_DEVICE)) {
case PCI_CLASS_STORAGE_IDE:
@@ -268,18 +284,26 @@ static void platform_fixed_ioport_writeb(void *opaque, uint32_t addr, uint32_t v
PCIXenPlatformState *s = opaque;
switch (addr) {
- case 0: /* Platform flags */ {
- hvmmem_type_t mem_type = (val & PFFLAG_ROM_LOCK) ?
- HVMMEM_ram_ro : HVMMEM_ram_rw;
- if (xen_set_mem_type(xen_domid, mem_type, 0xc0, 0x40)) {
- DPRINTF("unable to change ro/rw state of ROM memory area!\n");
- } else {
+ case 0: /* Platform flags */
+ if (xen_mode == XEN_EMULATE) {
+ /* XX: Use i440gx/q35 PAM setup to do this? */
s->flags = val & PFFLAG_ROM_LOCK;
- DPRINTF("changed ro/rw state of ROM memory area. now is %s state.\n",
- (mem_type == HVMMEM_ram_ro ? "ro":"rw"));
+#ifdef CONFIG_XEN
+ } else {
+ hvmmem_type_t mem_type = (val & PFFLAG_ROM_LOCK) ?
+ HVMMEM_ram_ro : HVMMEM_ram_rw;
+
+ if (xen_set_mem_type(xen_domid, mem_type, 0xc0, 0x40)) {
+ DPRINTF("unable to change ro/rw state of ROM memory area!\n");
+ } else {
+ s->flags = val & PFFLAG_ROM_LOCK;
+ DPRINTF("changed ro/rw state of ROM memory area. now is %s state.\n",
+ (mem_type == HVMMEM_ram_ro ? "ro" : "rw"));
+ }
+#endif
}
break;
- }
+
case 2:
log_writeb(s, val);
break;
@@ -497,8 +521,8 @@ static void xen_platform_realize(PCIDevice *dev, Error **errp)
uint8_t *pci_conf;
/* Device will crash on reset if xen is not initialized */
- if (!xen_enabled()) {
- error_setg(errp, "xen-platform device requires the Xen accelerator");
+ if (xen_mode == XEN_DISABLED) {
+ error_setg(errp, "xen-platform device requires a Xen guest");
return;
}
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index 7ce001cacd..55902e1df7 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -22,6 +22,7 @@
*/
#include "qemu/osdep.h"
+#include "hw/irq.h"
#include "hw/pci/msi.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
@@ -1085,8 +1086,8 @@ static void execute_ncq_command(NCQTransferState *ncq_tfs)
ncq_cb, ncq_tfs);
break;
case WRITE_FPDMA_QUEUED:
- trace_execute_ncq_command_read(ad->hba, port, ncq_tfs->tag,
- ncq_tfs->sector_count, ncq_tfs->lba);
+ trace_execute_ncq_command_write(ad->hba, port, ncq_tfs->tag,
+ ncq_tfs->sector_count, ncq_tfs->lba);
dma_acct_start(ide_state->blk, &ncq_tfs->acct,
&ncq_tfs->sglist, BLOCK_ACCT_WRITE);
ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist,
@@ -1268,7 +1269,7 @@ static void handle_reg_h2d_fis(AHCIState *s, int port,
cmd->status = 0;
/* We're ready to process the command in FIS byte 2. */
- ide_exec_cmd(&s->dev[port].port, cmd_fis[2]);
+ ide_bus_exec_cmd(&s->dev[port].port, cmd_fis[2]);
}
static int handle_cmd(AHCIState *s, int port, uint8_t slot)
@@ -1553,13 +1554,13 @@ void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports)
AHCIDevice *ad = &s->dev[i];
ide_bus_init(&ad->port, sizeof(ad->port), qdev, i, 1);
- ide_init2(&ad->port, irqs[i]);
+ ide_bus_init_output_irq(&ad->port, irqs[i]);
ad->hba = s;
ad->port_no = i;
ad->port.dma = &ad->dma;
ad->port.dma->ops = &ahci_dma_ops;
- ide_register_restart_cb(&ad->port);
+ ide_bus_register_restart_cb(&ad->port);
}
g_free(irqs);
}
@@ -1841,7 +1842,7 @@ void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd)
if (hd[i] == NULL) {
continue;
}
- ide_create_drive(&ahci->dev[i].port, 0, hd[i]);
+ ide_bus_create_drive(&ahci->dev[i].port, 0, hd[i]);
}
}
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
index 0a9aa6f009..dcc39df9a4 100644
--- a/hw/ide/atapi.c
+++ b/hw/ide/atapi.c
@@ -27,6 +27,7 @@
#include "hw/ide/internal.h"
#include "hw/scsi/scsi.h"
#include "sysemu/block-backend.h"
+#include "scsi/constants.h"
#include "trace.h"
#define ATAPI_SECTOR_BITS (2 + BDRV_SECTOR_BITS)
@@ -178,7 +179,7 @@ void ide_atapi_cmd_ok(IDEState *s)
s->status = READY_STAT | SEEK_STAT;
s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD;
ide_transfer_stop(s);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc)
@@ -190,7 +191,7 @@ void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc)
s->sense_key = sense_key;
s->asc = asc;
ide_transfer_stop(s);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
void ide_atapi_io_error(IDEState *s, int ret)
@@ -253,7 +254,7 @@ void ide_atapi_cmd_reply_end(IDEState *s)
} else {
/* a new transfer is needed */
s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
byte_count_limit = atapi_byte_count_limit(s);
trace_ide_atapi_cmd_reply_end_bcl(s, byte_count_limit);
size = s->packet_transfer_size;
@@ -293,7 +294,7 @@ void ide_atapi_cmd_reply_end(IDEState *s)
/* end of transfer */
trace_ide_atapi_cmd_reply_end_eot(s, s->status);
ide_atapi_cmd_ok(s);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
/* send a reply of 'size' bytes in s->io_buffer to an ATAPI command */
@@ -339,7 +340,7 @@ static void ide_atapi_cmd_check_status(IDEState *s)
s->error = MC_ERR | (UNIT_ATTENTION << 4);
s->status = ERR_STAT;
s->nsector = 0;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
/* ATAPI DMA support */
@@ -383,7 +384,7 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
if (s->packet_transfer_size <= 0) {
s->status = READY_STAT | SEEK_STAT;
s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
goto eot;
}
diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c
index 94c576262c..26a90ed45f 100644
--- a/hw/ide/cmd646.c
+++ b/hw/ide/cmd646.c
@@ -294,11 +294,11 @@ static void pci_cmd646_ide_realize(PCIDevice *dev, Error **errp)
qdev_init_gpio_in(ds, cmd646_set_irq, 2);
for (i = 0; i < 2; i++) {
ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2);
- ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i));
+ ide_bus_init_output_irq(&d->bus[i], qdev_get_gpio_in(ds, i));
bmdma_init(&d->bus[i], &d->bmdma[i], d);
d->bmdma[i].bus = &d->bus[i];
- ide_register_restart_cb(&d->bus[i]);
+ ide_bus_register_restart_cb(&d->bus[i]);
}
}
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 5d1039378f..2d034731cf 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -24,6 +24,7 @@
*/
#include "qemu/osdep.h"
+#include "hw/irq.h"
#include "hw/isa/isa.h"
#include "migration/vmstate.h"
#include "qemu/error-report.h"
@@ -653,7 +654,7 @@ void ide_set_sector(IDEState *s, int64_t sector_num)
static void ide_rw_error(IDEState *s) {
ide_abort_command(s);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
static void ide_buffered_readv_cb(void *opaque, int ret)
@@ -772,7 +773,7 @@ static void ide_sector_read_cb(void *opaque, int ret)
s->nsector -= n;
/* Allow the guest to read the io_buffer */
ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
static void ide_sector_read(IDEState *s)
@@ -836,7 +837,7 @@ void ide_dma_error(IDEState *s)
dma_buf_commit(s, 0);
ide_abort_command(s);
ide_set_inactive(s, false);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
int ide_handle_rw_error(IDEState *s, int error, int op)
@@ -906,7 +907,7 @@ static void ide_dma_cb(void *opaque, int ret)
/* end of transfer ? */
if (s->nsector == 0) {
s->status = READY_STAT | SEEK_STAT;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
goto eot;
}
@@ -1006,7 +1007,7 @@ static void ide_sector_write(IDEState *s);
static void ide_sector_write_timer_cb(void *opaque)
{
IDEState *s = opaque;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
static void ide_sector_write_cb(void *opaque, int ret)
@@ -1054,7 +1055,7 @@ static void ide_sector_write_cb(void *opaque, int ret)
timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(NANOSECONDS_PER_SECOND / 1000));
} else {
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
}
@@ -1105,7 +1106,7 @@ static void ide_flush_cb(void *opaque, int ret)
}
s->status = READY_STAT | SEEK_STAT;
ide_cmd_done(s);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
static void ide_flush_cache(IDEState *s)
@@ -1194,7 +1195,7 @@ static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
s->cdrom_changed = 1;
s->events.new_media = true;
s->events.eject_request = false;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
static void ide_cd_eject_request_cb(void *opaque, bool force)
@@ -1205,7 +1206,7 @@ static void ide_cd_eject_request_cb(void *opaque, bool force)
if (force) {
s->tray_locked = false;
}
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
static void ide_cmd_lba48_transform(IDEState *s, int lba48)
@@ -1264,7 +1265,7 @@ const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
IDEBus *bus = opaque;
- IDEState *s = idebus_active_if(bus);
+ IDEState *s = ide_bus_active_if(bus);
int reg_num = addr & 7;
trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
@@ -1326,7 +1327,7 @@ void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
case ATA_IOPORT_WR_COMMAND:
ide_clear_hob(bus);
qemu_irq_lower(bus->irq);
- ide_exec_cmd(bus, val);
+ ide_bus_exec_cmd(bus, val);
break;
}
}
@@ -1439,7 +1440,7 @@ static bool cmd_identify(IDEState *s, uint8_t cmd)
}
s->status = READY_STAT | SEEK_STAT;
ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
return false;
} else {
if (s->drive_kind == IDE_CD) {
@@ -1629,7 +1630,7 @@ static bool cmd_specify(IDEState *s, uint8_t cmd)
if (s->blk && s->drive_kind != IDE_CD) {
s->heads = (s->select & (ATA_DEV_HS)) + 1;
s->sectors = s->nsector;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
} else {
ide_abort_command(s);
}
@@ -1730,7 +1731,7 @@ static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
ide_atapi_identify(s);
s->status = READY_STAT | SEEK_STAT;
ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
return false;
}
@@ -1755,7 +1756,7 @@ static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
* They are part of the regular output (this is why ERR_STAT isn't set)
* Device 0 passed, Device 1 passed or not present. */
s->error = 0x01;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
return false;
@@ -1787,7 +1788,7 @@ static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
{
s->error = 0x09; /* miscellaneous error */
s->status = READY_STAT | SEEK_STAT;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
return false;
}
@@ -1826,7 +1827,7 @@ static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
s->io_buffer[0x1a] = 0x01; /* Hot count */
ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
return false;
}
@@ -1850,7 +1851,7 @@ static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
s->status = 0x00; /* NOTE: READY is _not_ set */
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
return false;
}
@@ -1933,7 +1934,7 @@ static bool cmd_smart(IDEState *s, uint8_t cmd)
s->status = READY_STAT | SEEK_STAT;
ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
return false;
case SMART_READ_DATA:
@@ -1974,7 +1975,7 @@ static bool cmd_smart(IDEState *s, uint8_t cmd)
s->status = READY_STAT | SEEK_STAT;
ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
return false;
case SMART_READ_LOG:
@@ -2013,7 +2014,7 @@ static bool cmd_smart(IDEState *s, uint8_t cmd)
}
s->status = READY_STAT | SEEK_STAT;
ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
return false;
case SMART_EXECUTE_OFFLINE:
@@ -2122,13 +2123,13 @@ static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
&& (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
}
-void ide_exec_cmd(IDEBus *bus, uint32_t val)
+void ide_bus_exec_cmd(IDEBus *bus, uint32_t val)
{
IDEState *s;
bool complete;
- s = idebus_active_if(bus);
- trace_ide_exec_cmd(bus, s, val);
+ s = ide_bus_active_if(bus);
+ trace_ide_bus_exec_cmd(bus, s, val);
/* ignore commands to non existent slave */
if (s != bus->ifs && !s->blk) {
@@ -2145,7 +2146,7 @@ void ide_exec_cmd(IDEBus *bus, uint32_t val)
if (!ide_cmd_permitted(s, val)) {
ide_abort_command(s);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
return;
}
@@ -2163,7 +2164,7 @@ void ide_exec_cmd(IDEBus *bus, uint32_t val)
}
ide_cmd_done(s);
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
}
}
@@ -2194,7 +2195,7 @@ const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
uint32_t ide_ioport_read(void *opaque, uint32_t addr)
{
IDEBus *bus = opaque;
- IDEState *s = idebus_active_if(bus);
+ IDEState *s = ide_bus_active_if(bus);
uint32_t reg_num;
int ret, hob;
@@ -2280,7 +2281,7 @@ uint32_t ide_ioport_read(void *opaque, uint32_t addr)
uint32_t ide_status_read(void *opaque, uint32_t addr)
{
IDEBus *bus = opaque;
- IDEState *s = idebus_active_if(bus);
+ IDEState *s = ide_bus_active_if(bus);
int ret;
if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
@@ -2369,7 +2370,7 @@ static bool ide_is_pio_out(IDEState *s)
void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
{
IDEBus *bus = opaque;
- IDEState *s = idebus_active_if(bus);
+ IDEState *s = ide_bus_active_if(bus);
uint8_t *p;
trace_ide_data_writew(addr, val, bus, s);
@@ -2405,7 +2406,7 @@ void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
uint32_t ide_data_readw(void *opaque, uint32_t addr)
{
IDEBus *bus = opaque;
- IDEState *s = idebus_active_if(bus);
+ IDEState *s = ide_bus_active_if(bus);
uint8_t *p;
int ret;
@@ -2443,7 +2444,7 @@ uint32_t ide_data_readw(void *opaque, uint32_t addr)
void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
{
IDEBus *bus = opaque;
- IDEState *s = idebus_active_if(bus);
+ IDEState *s = ide_bus_active_if(bus);
uint8_t *p;
trace_ide_data_writel(addr, val, bus, s);
@@ -2471,7 +2472,7 @@ void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
uint32_t ide_data_readl(void *opaque, uint32_t addr)
{
IDEBus *bus = opaque;
- IDEState *s = idebus_active_if(bus);
+ IDEState *s = ide_bus_active_if(bus);
uint8_t *p;
int ret;
@@ -2710,7 +2711,7 @@ static void ide_restart_bh(void *opaque)
return;
}
- s = idebus_active_if(bus);
+ s = ide_bus_active_if(bus);
is_read = (bus->error_status & IDE_RETRY_READ) != 0;
/* The error status must be cleared before resubmitting the request: The
@@ -2758,7 +2759,7 @@ static void ide_restart_cb(void *opaque, bool running, RunState state)
}
}
-void ide_register_restart_cb(IDEBus *bus)
+void ide_bus_register_restart_cb(IDEBus *bus)
{
if (bus->dma->ops->restart_dma) {
bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
@@ -2770,7 +2771,7 @@ static IDEDMA ide_dma_nop = {
.aiocb = NULL,
};
-void ide_init2(IDEBus *bus, qemu_irq irq)
+void ide_bus_init_output_irq(IDEBus *bus, qemu_irq irq_out)
{
int i;
@@ -2778,10 +2779,17 @@ void ide_init2(IDEBus *bus, qemu_irq irq)
ide_init1(bus, i);
ide_reset(&bus->ifs[i]);
}
- bus->irq = irq;
+ bus->irq = irq_out;
bus->dma = &ide_dma_nop;
}
+void ide_bus_set_irq(IDEBus *bus)
+{
+ if (!(bus->cmd & IDE_CTRL_DISABLE_IRQ)) {
+ qemu_irq_raise(bus->irq);
+ }
+}
+
void ide_exit(IDEState *s)
{
timer_free(s->sector_write_timer);
diff --git a/hw/ide/ich.c b/hw/ide/ich.c
index 1007a51fcb..d61faab532 100644
--- a/hw/ide/ich.c
+++ b/hw/ide/ich.c
@@ -61,6 +61,7 @@
*/
#include "qemu/osdep.h"
+#include "hw/irq.h"
#include "hw/pci/msi.h"
#include "hw/pci/pci.h"
#include "migration/vmstate.h"
diff --git a/hw/ide/ioport.c b/hw/ide/ioport.c
index e6caa537fa..e2ecc6230c 100644
--- a/hw/ide/ioport.c
+++ b/hw/ide/ioport.c
@@ -25,16 +25,6 @@
#include "qemu/osdep.h"
#include "hw/isa/isa.h"
-#include "qemu/error-report.h"
-#include "qemu/timer.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/dma.h"
-#include "hw/block/block.h"
-#include "sysemu/block-backend.h"
-#include "qapi/error.h"
-#include "qemu/cutils.h"
-#include "sysemu/replay.h"
-
#include "hw/ide/internal.h"
#include "trace.h"
diff --git a/hw/ide/isa.c b/hw/ide/isa.c
index 8bedbd13f1..95053e026f 100644
--- a/hw/ide/isa.c
+++ b/hw/ide/isa.c
@@ -31,23 +31,20 @@
#include "qemu/module.h"
#include "sysemu/dma.h"
+#include "hw/ide/isa.h"
#include "hw/ide/internal.h"
#include "qom/object.h"
/***********************************************************/
/* ISA IDE definitions */
-#define TYPE_ISA_IDE "isa-ide"
-OBJECT_DECLARE_SIMPLE_TYPE(ISAIDEState, ISA_IDE)
-
struct ISAIDEState {
ISADevice parent_obj;
IDEBus bus;
uint32_t iobase;
uint32_t iobase2;
- uint32_t isairq;
- qemu_irq irq;
+ uint32_t irqnum;
};
static void isa_ide_reset(DeviceState *d)
@@ -75,13 +72,12 @@ static void isa_ide_realizefn(DeviceState *dev, Error **errp)
ide_bus_init(&s->bus, sizeof(s->bus), dev, 0, 2);
ide_init_ioport(&s->bus, isadev, s->iobase, s->iobase2);
- s->irq = isa_get_irq(isadev, s->isairq);
- ide_init2(&s->bus, s->irq);
+ ide_bus_init_output_irq(&s->bus, isa_get_irq(isadev, s->irqnum));
vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_isa, s);
- ide_register_restart_cb(&s->bus);
+ ide_bus_register_restart_cb(&s->bus);
}
-ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq,
+ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int irqnum,
DriveInfo *hd0, DriveInfo *hd1)
{
DeviceState *dev;
@@ -92,15 +88,15 @@ ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq,
dev = DEVICE(isadev);
qdev_prop_set_uint32(dev, "iobase", iobase);
qdev_prop_set_uint32(dev, "iobase2", iobase2);
- qdev_prop_set_uint32(dev, "irq", isairq);
+ qdev_prop_set_uint32(dev, "irq", irqnum);
isa_realize_and_unref(isadev, bus, &error_fatal);
s = ISA_IDE(dev);
if (hd0) {
- ide_create_drive(&s->bus, 0, hd0);
+ ide_bus_create_drive(&s->bus, 0, hd0);
}
if (hd1) {
- ide_create_drive(&s->bus, 1, hd1);
+ ide_bus_create_drive(&s->bus, 1, hd1);
}
return isadev;
}
@@ -108,7 +104,7 @@ ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq,
static Property isa_ide_properties[] = {
DEFINE_PROP_UINT32("iobase", ISAIDEState, iobase, 0x1f0),
DEFINE_PROP_UINT32("iobase2", ISAIDEState, iobase2, 0x3f6),
- DEFINE_PROP_UINT32("irq", ISAIDEState, isairq, 14),
+ DEFINE_PROP_UINT32("irq", ISAIDEState, irqnum, 14),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index e604466acb..dca1cc9efc 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -24,6 +24,7 @@
*/
#include "qemu/osdep.h"
+#include "hw/irq.h"
#include "hw/ppc/mac_dbdma.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
@@ -59,7 +60,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
{
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
- IDEState *s = idebus_active_if(&m->bus);
+ IDEState *s = ide_bus_active_if(&m->bus);
int64_t offset;
MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n");
@@ -135,7 +136,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
{
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
- IDEState *s = idebus_active_if(&m->bus);
+ IDEState *s = ide_bus_active_if(&m->bus);
int64_t offset;
MACIO_DPRINTF("pmac_ide_transfer_cb\n");
@@ -159,7 +160,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
MACIO_DPRINTF("End of IDE transfer\n");
qemu_sglist_destroy(&s->sg);
s->status = READY_STAT | SEEK_STAT;
- ide_set_irq(s->bus);
+ ide_bus_set_irq(s->bus);
m->dma_active = false;
goto done;
}
@@ -219,7 +220,7 @@ done:
static void pmac_ide_transfer(DBDMA_io *io)
{
MACIOIDEState *m = io->opaque;
- IDEState *s = idebus_active_if(&m->bus);
+ IDEState *s = ide_bus_active_if(&m->bus);
MACIO_DPRINTF("\n");
@@ -250,7 +251,7 @@ static void pmac_ide_transfer(DBDMA_io *io)
static void pmac_ide_flush(DBDMA_io *io)
{
MACIOIDEState *m = io->opaque;
- IDEState *s = idebus_active_if(&m->bus);
+ IDEState *s = ide_bus_active_if(&m->bus);
if (s->bus->dma->aiocb) {
blk_drain(s->blk);
@@ -419,7 +420,7 @@ static void macio_ide_realizefn(DeviceState *dev, Error **errp)
{
MACIOIDEState *s = MACIO_IDE(dev);
- ide_init2(&s->bus, s->ide_irq);
+ ide_bus_init_output_irq(&s->bus, s->ide_irq);
/* Register DMA callbacks */
s->dma.ops = &dbdma_ops;
@@ -500,7 +501,7 @@ void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
for (i = 0; i < 2; i++) {
if (hd_table[i]) {
- ide_create_drive(&s->bus, i, hd_table[i]);
+ ide_bus_create_drive(&s->bus, i, hd_table[i]);
}
}
}
diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c
index 56c5be3655..f1017f7333 100644
--- a/hw/ide/microdrive.c
+++ b/hw/ide/microdrive.c
@@ -29,6 +29,7 @@
#include "qapi/error.h"
#include "qemu/module.h"
#include "sysemu/dma.h"
+#include "hw/irq.h"
#include "hw/ide/internal.h"
#include "qom/object.h"
@@ -249,14 +250,14 @@ static uint16_t md_common_read(PCMCIACardState *card, uint32_t at)
case 0xd: /* Error */
return ide_ioport_read(&s->bus, 0x1);
case 0xe: /* Alternate Status */
- ifs = idebus_active_if(&s->bus);
+ ifs = ide_bus_active_if(&s->bus);
if (ifs->blk) {
return ifs->status;
} else {
return 0;
}
case 0xf: /* Device Address */
- ifs = idebus_active_if(&s->bus);
+ ifs = ide_bus_active_if(&s->bus);
return 0xc2 | ((~ifs->select << 2) & 0x3c);
default:
return ide_ioport_read(&s->bus, at);
@@ -565,7 +566,7 @@ PCMCIACardState *dscm1xxxx_init(DriveInfo *dinfo)
qdev_realize(DEVICE(md), NULL, &error_fatal);
if (dinfo != NULL) {
- ide_create_drive(&md->bus, 0, dinfo);
+ ide_bus_create_drive(&md->bus, 0, dinfo);
}
md->bus.ifs[0].drive_kind = IDE_CFATA;
md->bus.ifs[0].mdata_size = METADATA_SIZE;
@@ -598,7 +599,7 @@ static void microdrive_realize(DeviceState *dev, Error **errp)
{
MicroDriveState *md = MICRODRIVE(dev);
- ide_init2(&md->bus, qemu_allocate_irq(md_set_irq, md, 0));
+ ide_bus_init_output_irq(&md->bus, qemu_allocate_irq(md_set_irq, md, 0));
}
static void microdrive_init(Object *obj)
diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c
index fb2ebd4847..3aeacab3bb 100644
--- a/hw/ide/mmio.c
+++ b/hw/ide/mmio.c
@@ -29,9 +29,9 @@
#include "qemu/module.h"
#include "sysemu/dma.h"
+#include "hw/ide/mmio.h"
#include "hw/ide/internal.h"
#include "hw/qdev-properties.h"
-#include "qom/object.h"
/***********************************************************/
/* MMIO based ide port
@@ -39,11 +39,6 @@
* dedicated ide controller, which is often seen on embedded boards.
*/
-#define TYPE_MMIO_IDE "mmio-ide"
-typedef struct MMIOIDEState MMIOState;
-DECLARE_INSTANCE_CHECKER(MMIOState, MMIO_IDE,
- TYPE_MMIO_IDE)
-
struct MMIOIDEState {
/*< private >*/
SysBusDevice parent_obj;
@@ -58,7 +53,7 @@ struct MMIOIDEState {
static void mmio_ide_reset(DeviceState *dev)
{
- MMIOState *s = MMIO_IDE(dev);
+ MMIOIDEState *s = MMIO_IDE(dev);
ide_bus_reset(&s->bus);
}
@@ -66,7 +61,7 @@ static void mmio_ide_reset(DeviceState *dev)
static uint64_t mmio_ide_read(void *opaque, hwaddr addr,
unsigned size)
{
- MMIOState *s = opaque;
+ MMIOIDEState *s = opaque;
addr >>= s->shift;
if (addr & 7)
return ide_ioport_read(&s->bus, addr);
@@ -77,7 +72,7 @@ static uint64_t mmio_ide_read(void *opaque, hwaddr addr,
static void mmio_ide_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
- MMIOState *s = opaque;
+ MMIOIDEState *s = opaque;
addr >>= s->shift;
if (addr & 7)
ide_ioport_write(&s->bus, addr, val);
@@ -94,14 +89,14 @@ static const MemoryRegionOps mmio_ide_ops = {
static uint64_t mmio_ide_status_read(void *opaque, hwaddr addr,
unsigned size)
{
- MMIOState *s= opaque;
+ MMIOIDEState *s = opaque;
return ide_status_read(&s->bus, 0);
}
static void mmio_ide_ctrl_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
- MMIOState *s = opaque;
+ MMIOIDEState *s = opaque;
ide_ctrl_write(&s->bus, 0, val);
}
@@ -116,8 +111,8 @@ static const VMStateDescription vmstate_ide_mmio = {
.version_id = 3,
.minimum_version_id = 0,
.fields = (VMStateField[]) {
- VMSTATE_IDE_BUS(bus, MMIOState),
- VMSTATE_IDE_DRIVES(bus.ifs, MMIOState),
+ VMSTATE_IDE_BUS(bus, MMIOIDEState),
+ VMSTATE_IDE_DRIVES(bus.ifs, MMIOIDEState),
VMSTATE_END_OF_LIST()
}
};
@@ -125,9 +120,9 @@ static const VMStateDescription vmstate_ide_mmio = {
static void mmio_ide_realizefn(DeviceState *dev, Error **errp)
{
SysBusDevice *d = SYS_BUS_DEVICE(dev);
- MMIOState *s = MMIO_IDE(dev);
+ MMIOIDEState *s = MMIO_IDE(dev);
- ide_init2(&s->bus, s->irq);
+ ide_bus_init_output_irq(&s->bus, s->irq);
memory_region_init_io(&s->iomem1, OBJECT(s), &mmio_ide_ops, s,
"ide-mmio.1", 16 << s->shift);
@@ -140,14 +135,14 @@ static void mmio_ide_realizefn(DeviceState *dev, Error **errp)
static void mmio_ide_initfn(Object *obj)
{
SysBusDevice *d = SYS_BUS_DEVICE(obj);
- MMIOState *s = MMIO_IDE(obj);
+ MMIOIDEState *s = MMIO_IDE(obj);
ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
sysbus_init_irq(d, &s->irq);
}
static Property mmio_ide_properties[] = {
- DEFINE_PROP_UINT32("shift", MMIOState, shift, 0),
+ DEFINE_PROP_UINT32("shift", MMIOIDEState, shift, 0),
DEFINE_PROP_END_OF_LIST()
};
@@ -164,7 +159,7 @@ static void mmio_ide_class_init(ObjectClass *oc, void *data)
static const TypeInfo mmio_ide_type_info = {
.name = TYPE_MMIO_IDE,
.parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(MMIOState),
+ .instance_size = sizeof(MMIOIDEState),
.instance_init = mmio_ide_initfn,
.class_init = mmio_ide_class_init,
};
@@ -176,13 +171,13 @@ static void mmio_ide_register_types(void)
void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1)
{
- MMIOState *s = MMIO_IDE(dev);
+ MMIOIDEState *s = MMIO_IDE(dev);
if (hd0 != NULL) {
- ide_create_drive(&s->bus, 0, hd0);
+ ide_bus_create_drive(&s->bus, 0, hd0);
}
if (hd1 != NULL) {
- ide_create_drive(&s->bus, 1, hd1);
+ ide_bus_create_drive(&s->bus, 1, hd1);
}
}
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 84ba733548..fc9224bbc9 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -24,6 +24,7 @@
*/
#include "qemu/osdep.h"
+#include "hw/irq.h"
#include "hw/pci/pci.h"
#include "migration/vmstate.h"
#include "sysemu/dma.h"
@@ -103,6 +104,12 @@ const MemoryRegionOps pci_ide_data_le_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
+static IDEState *bmdma_active_if(BMDMAState *bmdma)
+{
+ assert(bmdma->bus->retry_unit != (uint8_t)-1);
+ return bmdma->bus->ifs + bmdma->bus->retry_unit;
+}
+
static void bmdma_start_dma(const IDEDMA *dma, IDEState *s,
BlockCompletionFunc *dma_cb)
{
@@ -295,7 +302,7 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
/* Ignore writes to SSBM if it keeps the old value */
if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
if (!(val & BM_CMD_START)) {
- ide_cancel_dma_sync(idebus_active_if(bm->bus));
+ ide_cancel_dma_sync(ide_bus_active_if(bm->bus));
bm->status &= ~BM_STATUS_DMAING;
} else {
bm->cur_addr = bm->addr;
@@ -488,7 +495,7 @@ void pci_ide_create_devs(PCIDevice *dev)
ide_drive_get(hd_table, ARRAY_SIZE(hd_table));
for (i = 0; i < 4; i++) {
if (hd_table[i]) {
- ide_create_drive(d->bus + bus[i], unit[i], hd_table[i]);
+ ide_bus_create_drive(d->bus + bus[i], unit[i], hd_table[i]);
}
}
}
diff --git a/hw/ide/piix.c b/hw/ide/piix.c
index 267dbf37db..41d60921e3 100644
--- a/hw/ide/piix.c
+++ b/hw/ide/piix.c
@@ -28,14 +28,9 @@
*/
#include "qemu/osdep.h"
-#include "hw/pci/pci.h"
#include "migration/vmstate.h"
#include "qapi/error.h"
-#include "qemu/module.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/dma.h"
-
+#include "hw/pci/pci.h"
#include "hw/ide/piix.h"
#include "hw/ide/pci.h"
#include "trace.h"
@@ -126,7 +121,7 @@ static void piix_ide_reset(DeviceState *dev)
pci_set_byte(pci_conf + 0x20, 0x01); /* BMIBA: 20-23h */
}
-static int pci_piix_init_ports(PCIIDEState *d)
+static bool pci_piix_init_bus(PCIIDEState *d, unsigned i, Error **errp)
{
static const struct {
int iobase;
@@ -136,30 +131,29 @@ static int pci_piix_init_ports(PCIIDEState *d)
{0x1f0, 0x3f6, 14},
{0x170, 0x376, 15},
};
- int i, ret;
-
- for (i = 0; i < 2; i++) {
- ide_bus_init(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2);
- ret = ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase,
- port_info[i].iobase2);
- if (ret) {
- return ret;
- }
- ide_init2(&d->bus[i], isa_get_irq(NULL, port_info[i].isairq));
-
- bmdma_init(&d->bus[i], &d->bmdma[i], d);
- d->bmdma[i].bus = &d->bus[i];
- ide_register_restart_cb(&d->bus[i]);
+ int ret;
+
+ ide_bus_init(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2);
+ ret = ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase,
+ port_info[i].iobase2);
+ if (ret) {
+ error_setg_errno(errp, -ret, "Failed to realize %s port %u",
+ object_get_typename(OBJECT(d)), i);
+ return false;
}
+ ide_bus_init_output_irq(&d->bus[i], isa_get_irq(NULL, port_info[i].isairq));
- return 0;
+ bmdma_init(&d->bus[i], &d->bmdma[i], d);
+ d->bmdma[i].bus = &d->bus[i];
+ ide_bus_register_restart_cb(&d->bus[i]);
+
+ return true;
}
static void pci_piix_ide_realize(PCIDevice *dev, Error **errp)
{
PCIIDEState *d = PCI_IDE(dev);
uint8_t *pci_conf = dev->config;
- int rc;
pci_conf[PCI_CLASS_PROG] = 0x80; // legacy ATA mode
@@ -168,10 +162,10 @@ static void pci_piix_ide_realize(PCIDevice *dev, Error **errp)
vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_pci, d);
- rc = pci_piix_init_ports(d);
- if (rc) {
- error_setg_errno(errp, -rc, "Failed to realize %s",
- object_get_typename(OBJECT(dev)));
+ for (unsigned i = 0; i < 2; i++) {
+ if (!pci_piix_init_bus(d, i, errp)) {
+ return;
+ }
}
}
diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c
index 6f6c7462f3..1b3b4da01d 100644
--- a/hw/ide/qdev.c
+++ b/hw/ide/qdev.c
@@ -124,7 +124,7 @@ static void ide_qdev_realize(DeviceState *qdev, Error **errp)
dc->realize(dev, errp);
}
-IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive)
+IDEDevice *ide_bus_create_drive(IDEBus *bus, int unit, DriveInfo *drive)
{
DeviceState *dev;
diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c
index 46204f10d7..f9becdff8e 100644
--- a/hw/ide/sii3112.c
+++ b/hw/ide/sii3112.c
@@ -284,11 +284,11 @@ static void sii3112_pci_realize(PCIDevice *dev, Error **errp)
qdev_init_gpio_in(ds, sii3112_set_irq, 2);
for (i = 0; i < 2; i++) {
ide_bus_init(&s->bus[i], sizeof(s->bus[i]), ds, i, 1);
- ide_init2(&s->bus[i], qdev_get_gpio_in(ds, i));
+ ide_bus_init_output_irq(&s->bus[i], qdev_get_gpio_in(ds, i));
bmdma_init(&s->bus[i], &s->bmdma[i], s);
s->bmdma[i].bus = &s->bus[i];
- ide_register_restart_cb(&s->bus[i]);
+ ide_bus_register_restart_cb(&s->bus[i]);
}
}
diff --git a/hw/ide/trace-events b/hw/ide/trace-events
index 15d7921f15..57042cafdd 100644
--- a/hw/ide/trace-events
+++ b/hw/ide/trace-events
@@ -12,7 +12,7 @@ ide_data_writew(uint32_t addr, uint32_t val, void *bus, void *s)
ide_data_readl(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (Data: Long); val 0x%08"PRIx32"; bus %p; IDEState %p"
ide_data_writel(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO wr @ 0x%"PRIx32" (Data: Long); val 0x%08"PRIx32"; bus %p; IDEState %p"
# misc
-ide_exec_cmd(void *bus, void *state, uint32_t cmd) "IDE exec cmd: bus %p; state %p; cmd 0x%02x"
+ide_bus_exec_cmd(void *bus, void *state, uint32_t cmd) "IDE exec cmd: bus %p; state %p; cmd 0x%02x"
ide_cancel_dma_sync_buffered(void *fn, void *req) "invoking cb %p of buffered request %p with -ECANCELED"
ide_cancel_dma_sync_remaining(void) "draining all remaining requests"
ide_sector_read(int64_t sector_num, int nsectors) "sector=%"PRId64" nsectors=%d"
@@ -91,6 +91,7 @@ ahci_populate_sglist_short_map(void *s, int port) "ahci(%p)[%d]: mapped less tha
ahci_populate_sglist_bad_offset(void *s, int port, int off_idx, int64_t off_pos) "ahci(%p)[%d]: Incorrect offset! off_idx: %d, off_pos: %"PRId64
ncq_finish(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: NCQ transfer finished"
execute_ncq_command_read(void *s, int port, uint8_t tag, int count, int64_t lba) "ahci(%p)[%d][tag:%d]: NCQ reading %d sectors from LBA %"PRId64
+execute_ncq_command_write(void *s, int port, uint8_t tag, int count, int64_t lba) "ahci(%p)[%d][tag:%d]: NCQ writing %d sectors to LBA %"PRId64
execute_ncq_command_unsup(void *s, int port, uint8_t tag, uint8_t cmd) "ahci(%p)[%d][tag:%d]: error: unsupported NCQ command (0x%02x) received"
process_ncq_command_mismatch(void *s, int port, uint8_t tag, uint8_t slot) "ahci(%p)[%d][tag:%d]: Warning: NCQ slot (%d) did not match the given tag"
process_ncq_command_aux(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Attempt to use NCQ auxiliary fields"
diff --git a/hw/ide/via.c b/hw/ide/via.c
index e1a429405d..177baea9a7 100644
--- a/hw/ide/via.c
+++ b/hw/ide/via.c
@@ -90,7 +90,7 @@ static void bmdma_setup_bar(PCIIDEState *d)
int i;
memory_region_init(&d->bmdma_bar, OBJECT(d), "via-bmdma-container", 16);
- for(i = 0;i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(d->bmdma); i++) {
BMDMAState *bm = &d->bmdma[i];
memory_region_init_io(&bm->extra_io, OBJECT(d), &via_bmdma_ops, bm,
@@ -122,7 +122,7 @@ static void via_ide_reset(DeviceState *dev)
uint8_t *pci_conf = pd->config;
int i;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(d->bus); i++) {
ide_bus_reset(&d->bus[i]);
}
@@ -188,14 +188,14 @@ static void via_ide_realize(PCIDevice *dev, Error **errp)
bmdma_setup_bar(d);
pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar);
- qdev_init_gpio_in(ds, via_ide_set_irq, 2);
- for (i = 0; i < 2; i++) {
- ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2);
- ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i));
+ qdev_init_gpio_in(ds, via_ide_set_irq, ARRAY_SIZE(d->bus));
+ for (i = 0; i < ARRAY_SIZE(d->bus); i++) {
+ ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, MAX_IDE_DEVS);
+ ide_bus_init_output_irq(&d->bus[i], qdev_get_gpio_in(ds, i));
bmdma_init(&d->bus[i], &d->bmdma[i], d);
d->bmdma[i].bus = &d->bus[i];
- ide_register_restart_cb(&d->bus[i]);
+ ide_bus_register_restart_cb(&d->bus[i]);
}
}
@@ -204,7 +204,7 @@ static void via_ide_exitfn(PCIDevice *dev)
PCIIDEState *d = PCI_IDE(dev);
unsigned i;
- for (i = 0; i < 2; ++i) {
+ for (i = 0; i < ARRAY_SIZE(d->bmdma); ++i) {
memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io);
memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport);
}
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 2d3e55f4e2..0ff060f721 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -20,7 +20,7 @@
#include "qemu/thread.h"
#include "hw/i386/apic_internal.h"
#include "hw/i386/apic.h"
-#include "hw/i386/ioapic.h"
+#include "hw/intc/ioapic.h"
#include "hw/intc/i8259.h"
#include "hw/intc/kvm_irqcount.h"
#include "hw/pci/msi.h"
diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c
index 0261f087b2..17910f3bcb 100644
--- a/hw/intc/i8259.c
+++ b/hw/intc/i8259.c
@@ -406,7 +406,7 @@ static void pic_realize(DeviceState *dev, Error **errp)
pc->parent_realize(dev, errp);
}
-qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq)
+qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq_in)
{
qemu_irq *irq_set;
DeviceState *dev;
@@ -418,7 +418,7 @@ qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq)
isadev = i8259_init_chip(TYPE_I8259, bus, true);
dev = DEVICE(isadev);
- qdev_connect_gpio_out(dev, 0, parent_irq);
+ qdev_connect_gpio_out(dev, 0, parent_irq_in);
for (i = 0 ; i < 8; i++) {
irq_set[i] = qdev_get_gpio_in(dev, i);
}
diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c
index 264262959d..6364ecab1b 100644
--- a/hw/intc/ioapic.c
+++ b/hw/intc/ioapic.c
@@ -24,10 +24,10 @@
#include "qapi/error.h"
#include "monitor/monitor.h"
#include "hw/i386/apic.h"
-#include "hw/i386/ioapic.h"
-#include "hw/i386/ioapic_internal.h"
#include "hw/i386/x86.h"
#include "hw/intc/i8259.h"
+#include "hw/intc/ioapic.h"
+#include "hw/intc/ioapic_internal.h"
#include "hw/pci/msi.h"
#include "hw/qdev-properties.h"
#include "sysemu/kvm.h"
diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c
index aa5f760871..b05f436dac 100644
--- a/hw/intc/ioapic_common.c
+++ b/hw/intc/ioapic_common.c
@@ -24,9 +24,9 @@
#include "qemu/module.h"
#include "migration/vmstate.h"
#include "monitor/monitor.h"
-#include "hw/i386/ioapic.h"
-#include "hw/i386/ioapic_internal.h"
#include "hw/intc/intc.h"
+#include "hw/intc/ioapic.h"
+#include "hw/intc/ioapic_internal.h"
#include "hw/sysbus.h"
/* ioapic_no count start from 0 to MAX_IOAPICS,
diff --git a/hw/intc/ioapic_internal.h b/hw/intc/ioapic_internal.h
new file mode 100644
index 0000000000..37b8565539
--- /dev/null
+++ b/hw/intc/ioapic_internal.h
@@ -0,0 +1,118 @@
+/*
+ * IOAPIC emulation logic - internal interfaces
+ *
+ * Copyright (c) 2004-2005 Fabrice Bellard
+ * Copyright (c) 2009 Xiantao Zhang, Intel
+ * Copyright (c) 2011 Jan Kiszka, Siemens AG
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_INTC_IOAPIC_INTERNAL_H
+#define HW_INTC_IOAPIC_INTERNAL_H
+
+#include "exec/memory.h"
+#include "hw/intc/ioapic.h"
+#include "hw/sysbus.h"
+#include "qemu/notify.h"
+#include "qom/object.h"
+
+#define MAX_IOAPICS 2
+
+#define IOAPIC_LVT_DEST_SHIFT 56
+#define IOAPIC_LVT_DEST_IDX_SHIFT 48
+#define IOAPIC_LVT_MASKED_SHIFT 16
+#define IOAPIC_LVT_TRIGGER_MODE_SHIFT 15
+#define IOAPIC_LVT_REMOTE_IRR_SHIFT 14
+#define IOAPIC_LVT_POLARITY_SHIFT 13
+#define IOAPIC_LVT_DELIV_STATUS_SHIFT 12
+#define IOAPIC_LVT_DEST_MODE_SHIFT 11
+#define IOAPIC_LVT_DELIV_MODE_SHIFT 8
+
+#define IOAPIC_LVT_MASKED (1 << IOAPIC_LVT_MASKED_SHIFT)
+#define IOAPIC_LVT_TRIGGER_MODE (1 << IOAPIC_LVT_TRIGGER_MODE_SHIFT)
+#define IOAPIC_LVT_REMOTE_IRR (1 << IOAPIC_LVT_REMOTE_IRR_SHIFT)
+#define IOAPIC_LVT_POLARITY (1 << IOAPIC_LVT_POLARITY_SHIFT)
+#define IOAPIC_LVT_DELIV_STATUS (1 << IOAPIC_LVT_DELIV_STATUS_SHIFT)
+#define IOAPIC_LVT_DEST_MODE (1 << IOAPIC_LVT_DEST_MODE_SHIFT)
+#define IOAPIC_LVT_DELIV_MODE (7 << IOAPIC_LVT_DELIV_MODE_SHIFT)
+
+/* Bits that are read-only for IOAPIC entry */
+#define IOAPIC_RO_BITS (IOAPIC_LVT_REMOTE_IRR | \
+ IOAPIC_LVT_DELIV_STATUS)
+#define IOAPIC_RW_BITS (~(uint64_t)IOAPIC_RO_BITS)
+
+#define IOAPIC_TRIGGER_EDGE 0
+#define IOAPIC_TRIGGER_LEVEL 1
+
+/*io{apic,sapic} delivery mode*/
+#define IOAPIC_DM_FIXED 0x0
+#define IOAPIC_DM_LOWEST_PRIORITY 0x1
+#define IOAPIC_DM_PMI 0x2
+#define IOAPIC_DM_NMI 0x4
+#define IOAPIC_DM_INIT 0x5
+#define IOAPIC_DM_SIPI 0x6
+#define IOAPIC_DM_EXTINT 0x7
+#define IOAPIC_DM_MASK 0x7
+
+#define IOAPIC_VECTOR_MASK 0xff
+
+#define IOAPIC_IOREGSEL 0x00
+#define IOAPIC_IOWIN 0x10
+#define IOAPIC_EOI 0x40
+
+#define IOAPIC_REG_ID 0x00
+#define IOAPIC_REG_VER 0x01
+#define IOAPIC_REG_ARB 0x02
+#define IOAPIC_REG_REDTBL_BASE 0x10
+#define IOAPIC_ID 0x00
+
+#define IOAPIC_ID_SHIFT 24
+#define IOAPIC_ID_MASK 0xf
+
+#define IOAPIC_VER_ENTRIES_SHIFT 16
+
+
+#define TYPE_IOAPIC_COMMON "ioapic-common"
+OBJECT_DECLARE_TYPE(IOAPICCommonState, IOAPICCommonClass, IOAPIC_COMMON)
+
+struct IOAPICCommonClass {
+ SysBusDeviceClass parent_class;
+
+ DeviceRealize realize;
+ DeviceUnrealize unrealize;
+ void (*pre_save)(IOAPICCommonState *s);
+ void (*post_load)(IOAPICCommonState *s);
+};
+
+struct IOAPICCommonState {
+ SysBusDevice busdev;
+ MemoryRegion io_memory;
+ uint8_t id;
+ uint8_t ioregsel;
+ uint32_t irr;
+ uint64_t ioredtbl[IOAPIC_NUM_PINS];
+ Notifier machine_done;
+ uint8_t version;
+ uint64_t irq_count[IOAPIC_NUM_PINS];
+ int irq_level[IOAPIC_NUM_PINS];
+ int irq_eoi[IOAPIC_NUM_PINS];
+ QEMUTimer *delayed_ioapic_service_timer;
+};
+
+void ioapic_reset_common(DeviceState *dev);
+
+void ioapic_stat_update_irq(IOAPICCommonState *s, int irq, int level);
+
+#endif /* HW_INTC_IOAPIC_INTERNAL_H */
diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c
index e3322e03bf..233059c6dc 100644
--- a/hw/isa/i82378.c
+++ b/hw/isa/i82378.c
@@ -32,8 +32,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(I82378State, I82378)
struct I82378State {
PCIDevice parent_obj;
- qemu_irq out[2];
- qemu_irq *i8259;
+ qemu_irq cpu_intr;
+ qemu_irq *isa_irqs_in;
MemoryRegion io;
};
@@ -47,18 +47,12 @@ static const VMStateDescription vmstate_i82378 = {
},
};
-static void i82378_request_out0_irq(void *opaque, int irq, int level)
-{
- I82378State *s = opaque;
- qemu_set_irq(s->out[0], level);
-}
-
static void i82378_request_pic_irq(void *opaque, int irq, int level)
{
DeviceState *dev = opaque;
I82378State *s = I82378(dev);
- qemu_set_irq(s->i8259[irq], level);
+ qemu_set_irq(s->isa_irqs_in[irq], level);
}
static void i82378_realize(PCIDevice *pci, Error **errp)
@@ -94,9 +88,8 @@ static void i82378_realize(PCIDevice *pci, Error **errp)
*/
/* 2 82C59 (irq) */
- s->i8259 = i8259_init(isabus,
- qemu_allocate_irq(i82378_request_out0_irq, s, 0));
- isa_bus_irqs(isabus, s->i8259);
+ s->isa_irqs_in = i8259_init(isabus, s->cpu_intr);
+ isa_bus_register_input_irqs(isabus, s->isa_irqs_in);
/* 1 82C54 (pit) */
pit = i8254_pit_init(isabus, 0x40, 0, NULL);
@@ -113,7 +106,7 @@ static void i82378_init(Object *obj)
DeviceState *dev = DEVICE(obj);
I82378State *s = I82378(obj);
- qdev_init_gpio_out(dev, s->out, 1);
+ qdev_init_gpio_out(dev, &s->cpu_intr, 1);
qdev_init_gpio_in(dev, i82378_request_pic_irq, 16);
}
diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c
index f155b80010..a289eccfb1 100644
--- a/hw/isa/isa-bus.c
+++ b/hw/isa/isa-bus.c
@@ -67,13 +67,20 @@ ISABus *isa_bus_new(DeviceState *dev, MemoryRegion* address_space,
return isabus;
}
-void isa_bus_irqs(ISABus *bus, qemu_irq *irqs)
+void isa_bus_register_input_irqs(ISABus *bus, qemu_irq *irqs_in)
{
- bus->irqs = irqs;
+ bus->irqs_in = irqs_in;
+}
+
+qemu_irq isa_bus_get_irq(ISABus *bus, unsigned irqnum)
+{
+ assert(irqnum < ISA_NUM_IRQS);
+ assert(bus->irqs_in);
+ return bus->irqs_in[irqnum];
}
/*
- * isa_get_irq() returns the corresponding qemu_irq entry for the i8259.
+ * isa_get_irq() returns the corresponding input qemu_irq entry for the i8259.
*
* This function is only for special cases such as the 'ferr', and
* temporary use for normal devices until they are converted to qdev.
@@ -81,14 +88,13 @@ void isa_bus_irqs(ISABus *bus, qemu_irq *irqs)
qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq)
{
assert(!dev || ISA_BUS(qdev_get_parent_bus(DEVICE(dev))) == isabus);
- assert(isairq < ISA_NUM_IRQS);
- return isabus->irqs[isairq];
+ return isa_bus_get_irq(isabus, isairq);
}
void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq)
{
- qemu_irq irq = isa_get_irq(isadev, isairq);
- qdev_connect_gpio_out(DEVICE(isadev), gpioirq, irq);
+ qemu_irq input_irq = isa_get_irq(isadev, isairq);
+ qdev_connect_gpio_out(DEVICE(isadev), gpioirq, input_irq);
}
void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16)
@@ -99,7 +105,7 @@ void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16)
bus->dma[1] = dma16;
}
-IsaDma *isa_get_dma(ISABus *bus, int nchan)
+IsaDma *isa_bus_get_dma(ISABus *bus, int nchan)
{
assert(bus);
return bus->dma[nchan > 3 ? 1 : 0];
@@ -114,7 +120,7 @@ static inline void isa_init_ioport(ISADevice *dev, uint16_t ioport)
void isa_register_ioport(ISADevice *dev, MemoryRegion *io, uint16_t start)
{
- memory_region_add_subregion(isabus->address_space_io, start, io);
+ memory_region_add_subregion(isa_address_space_io(dev), start, io);
isa_init_ioport(dev, start);
}
@@ -135,7 +141,7 @@ int isa_register_portio_list(ISADevice *dev,
isa_init_ioport(dev, start);
portio_list_init(piolist, OBJECT(dev), pio_start, opaque, name);
- portio_list_add(piolist, isabus->address_space_io, start);
+ portio_list_add(piolist, isa_address_space_io(dev), start);
return 0;
}
@@ -164,6 +170,11 @@ bool isa_realize_and_unref(ISADevice *dev, ISABus *bus, Error **errp)
return qdev_realize_and_unref(&dev->parent_obj, &bus->parent_obj, errp);
}
+ISABus *isa_bus_from_device(ISADevice *dev)
+{
+ return ISA_BUS(qdev_get_parent_bus(DEVICE(dev)));
+}
+
ISADevice *isa_vga_init(ISABus *bus)
{
vga_interface_created = true;
@@ -213,7 +224,6 @@ static const TypeInfo isa_device_type_info = {
.parent = TYPE_DEVICE,
.instance_size = sizeof(ISADevice),
.abstract = true,
- .class_size = sizeof(ISADeviceClass),
.class_init = isa_device_class_init,
};
diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c
index 1fba3c210c..d8303d0322 100644
--- a/hw/isa/lpc_ich9.c
+++ b/hw/isa/lpc_ich9.c
@@ -40,8 +40,8 @@
#include "hw/irq.h"
#include "hw/isa/apm.h"
#include "hw/pci/pci.h"
-#include "hw/pci/pci_bridge.h"
-#include "hw/i386/ich9.h"
+#include "hw/southbridge/ich9.h"
+#include "hw/i386/pc.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/ich9.h"
#include "hw/pci/pci_bus.h"
@@ -57,8 +57,6 @@
/*****************************************************************************/
/* ICH9 LPC PCI to ISA bridge */
-static void ich9_lpc_reset(DeviceState *qdev);
-
/* chipset configuration register
* to access chipset configuration registers, pci_[sg]et_{byte, word, long}
* are used.
@@ -259,7 +257,7 @@ static void ich9_lpc_update_apic(ICH9LPCState *lpc, int gsi)
qemu_set_irq(lpc->gsi[gsi], level);
}
-void ich9_lpc_set_irq(void *opaque, int pirq, int level)
+static void ich9_lpc_set_irq(void *opaque, int pirq, int level)
{
ICH9LPCState *lpc = opaque;
int pic_irq, pic_dis;
@@ -275,7 +273,7 @@ void ich9_lpc_set_irq(void *opaque, int pirq, int level)
/* return the pirq number (PIRQ[A-H]:0-7) corresponding to
* a given device irq pin.
*/
-int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx)
+static int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx)
{
BusState *bus = qdev_get_parent_bus(&pci_dev->qdev);
PCIBus *pci_bus = PCI_BUS(bus);
@@ -286,7 +284,7 @@ int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx)
return lpc->irr[PCI_SLOT(pci_dev->devfn)][intx];
}
-PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin)
+static PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin)
{
ICH9LPCState *lpc = opaque;
PCIINTxRoute route;
@@ -407,14 +405,13 @@ static void smi_features_ok_callback(void *opaque)
lpc->smi_features_ok = 1;
}
-void ich9_lpc_pm_init(PCIDevice *lpc_pci, bool smm_enabled)
+static void ich9_lpc_pm_init(ICH9LPCState *lpc)
{
- ICH9LPCState *lpc = ICH9_LPC_DEVICE(lpc_pci);
qemu_irq sci_irq;
FWCfgState *fw_cfg = fw_cfg_find();
sci_irq = qemu_allocate_irq(ich9_set_sci, lpc, 0);
- ich9_pm_init(lpc_pci, &lpc->pm, smm_enabled, sci_irq);
+ ich9_pm_init(PCI_DEVICE(lpc), &lpc->pm, sci_irq);
if (lpc->smi_host_features && fw_cfg) {
uint64_t host_features_le;
@@ -440,8 +437,6 @@ void ich9_lpc_pm_init(PCIDevice *lpc_pci, bool smm_enabled)
sizeof lpc->smi_features_ok,
true);
}
-
- ich9_lpc_reset(DEVICE(lpc));
}
/* APM */
@@ -680,6 +675,7 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
DeviceState *dev = DEVICE(d);
+ PCIBus *pci_bus = pci_get_bus(d);
ISABus *isa_bus;
if ((lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)) &&
@@ -709,8 +705,6 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
memory_region_init_io(&lpc->rcrb_mem, OBJECT(d), &rcrb_mmio_ops, lpc,
"lpc-rcrb-mmio", ICH9_CC_SIZE);
- lpc->isa_bus = isa_bus;
-
ich9_cc_init(lpc);
apm_init(d, &lpc->apm, ich9_apm_ctrl_changed, lpc);
@@ -723,11 +717,17 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
ICH9_RST_CNT_IOPORT, &lpc->rst_cnt_mem,
1);
- qdev_init_gpio_out_named(dev, lpc->gsi, ICH9_GPIO_GSI, GSI_NUM_PINS);
+ qdev_init_gpio_out_named(dev, lpc->gsi, ICH9_GPIO_GSI, IOAPIC_NUM_PINS);
- isa_bus_irqs(isa_bus, lpc->gsi);
+ isa_bus_register_input_irqs(isa_bus, lpc->gsi);
i8257_dma_init(isa_bus, 0);
+
+ pci_bus_irqs(pci_bus, ich9_lpc_set_irq, d, ICH9_LPC_NB_PIRQS);
+ pci_bus_map_irqs(pci_bus, ich9_lpc_map_irq);
+ pci_bus_set_route_irq_fn(pci_bus, ich9_route_intx_pin_to_irq);
+
+ ich9_lpc_pm_init(lpc);
}
static bool ich9_rst_cnt_needed(void *opaque)
@@ -794,6 +794,7 @@ static const VMStateDescription vmstate_ich9_lpc = {
static Property ich9_lpc_properties[] = {
DEFINE_PROP_BOOL("noreboot", ICH9LPCState, pin_strap.spkr_hi, false),
DEFINE_PROP_BOOL("smm-compat", ICH9LPCState, pm.smm_compat, false),
+ DEFINE_PROP_BOOL("smm-enabled", ICH9LPCState, pm.smm_enabled, false),
DEFINE_PROP_BIT64("x-smi-broadcast", ICH9LPCState, smi_host_features,
ICH9_LPC_SMI_F_BROADCAST_BIT, true),
DEFINE_PROP_BIT64("x-smi-cpu-hotplug", ICH9LPCState, smi_host_features,
@@ -813,8 +814,7 @@ static void ich9_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
static void build_ich9_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
{
Aml *field;
- ICH9LPCState *s = ICH9_LPC_DEVICE(adev);
- BusState *bus = BUS(s->isa_bus);
+ BusState *bus = qdev_get_child_bus(DEVICE(adev), "isa.0");
Aml *sb_scope = aml_scope("\\_SB");
/* ICH9 PCI to ISA irq remapping */
diff --git a/hw/isa/piix4.c b/hw/isa/piix4.c
index de60ceef73..e0b149f8eb 100644
--- a/hw/isa/piix4.c
+++ b/hw/isa/piix4.c
@@ -47,7 +47,7 @@ struct PIIX4State {
qemu_irq cpu_intr;
qemu_irq *isa;
- RTCState rtc;
+ MC146818RtcState rtc;
PCIIDEState ide;
UHCIState uhci;
PIIX4PMState pm;
@@ -212,7 +212,7 @@ static void piix4_realize(PCIDevice *dev, Error **errp)
s->isa = i8259_init(isa_bus, *i8259_out_irq);
/* initialize ISA irqs */
- isa_bus_irqs(isa_bus, s->isa);
+ isa_bus_register_input_irqs(isa_bus, s->isa);
/* initialize pit */
i8254_pit_init(isa_bus, 0x40, 0, NULL);
diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c
index 3f9bd0c04d..f4c40965cd 100644
--- a/hw/isa/vt82c686.c
+++ b/hw/isa/vt82c686.c
@@ -548,9 +548,9 @@ OBJECT_DECLARE_SIMPLE_TYPE(ViaISAState, VIA_ISA)
struct ViaISAState {
PCIDevice dev;
qemu_irq cpu_intr;
- qemu_irq *isa_irqs;
+ qemu_irq *isa_irqs_in;
ViaSuperIOState via_sio;
- RTCState rtc;
+ MC146818RtcState rtc;
PCIIDEState ide;
UHCIState uhci[2];
ViaPMState pm;
@@ -595,13 +595,7 @@ static const TypeInfo via_isa_info = {
void via_isa_set_irq(PCIDevice *d, int n, int level)
{
ViaISAState *s = VIA_ISA(d);
- qemu_set_irq(s->isa_irqs[n], level);
-}
-
-static void via_isa_request_i8259_irq(void *opaque, int irq, int level)
-{
- ViaISAState *s = opaque;
- qemu_set_irq(s->cpu_intr, level);
+ qemu_set_irq(s->isa_irqs_in[n], level);
}
static void via_isa_realize(PCIDevice *d, Error **errp)
@@ -609,12 +603,10 @@ static void via_isa_realize(PCIDevice *d, Error **errp)
ViaISAState *s = VIA_ISA(d);
DeviceState *dev = DEVICE(d);
PCIBus *pci_bus = pci_get_bus(d);
- qemu_irq *isa_irq;
ISABus *isa_bus;
int i;
qdev_init_gpio_out(dev, &s->cpu_intr, 1);
- isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1);
isa_bus = isa_bus_new(dev, pci_address_space(d), pci_address_space_io(d),
errp);
@@ -622,8 +614,8 @@ static void via_isa_realize(PCIDevice *d, Error **errp)
return;
}
- s->isa_irqs = i8259_init(isa_bus, *isa_irq);
- isa_bus_irqs(isa_bus, s->isa_irqs);
+ s->isa_irqs_in = i8259_init(isa_bus, s->cpu_intr);
+ isa_bus_register_input_irqs(isa_bus, s->isa_irqs_in);
i8254_pit_init(isa_bus, 0x40, 0, NULL);
i8257_dma_init(isa_bus, 0);
diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c
index 6aefe9a61b..ca4426a92c 100644
--- a/hw/mips/jazz.c
+++ b/hw/mips/jazz.c
@@ -249,7 +249,7 @@ static void mips_jazz_init(MachineState *machine,
/* ISA devices */
i8259 = i8259_init(isa_bus, env->irq[4]);
- isa_bus_irqs(isa_bus, i8259);
+ isa_bus_register_input_irqs(isa_bus, i8259);
i8257_dma_init(isa_bus, 0);
pit = i8254_pit_init(isa_bus, 0x40, 0, NULL);
pcspk_init(isa_new(TYPE_PC_SPEAKER), isa_bus, pit);
diff --git a/hw/misc/macio/gpio.c b/hw/misc/macio/gpio.c
index c8ac5633b2..4deb330471 100644
--- a/hw/misc/macio/gpio.c
+++ b/hw/misc/macio/gpio.c
@@ -28,6 +28,7 @@
#include "migration/vmstate.h"
#include "hw/misc/macio/macio.h"
#include "hw/misc/macio/gpio.h"
+#include "hw/irq.h"
#include "hw/nmi.h"
#include "qemu/log.h"
#include "qemu/module.h"
diff --git a/hw/nubus/nubus-device.c b/hw/nubus/nubus-device.c
index 0f1852f671..49008e4938 100644
--- a/hw/nubus/nubus-device.c
+++ b/hw/nubus/nubus-device.c
@@ -80,6 +80,7 @@ static void nubus_device_realize(DeviceState *dev, Error **errp)
&error_abort);
ret = load_image_mr(path, &nd->decl_rom);
g_free(path);
+ g_free(name);
if (ret < 0) {
error_setg(errp, "could not load romfile \"%s\"", nd->romfile);
return;
diff --git a/hw/pci-bridge/i82801b11.c b/hw/pci-bridge/i82801b11.c
index f3b4a14611..0e83cd11b2 100644
--- a/hw/pci-bridge/i82801b11.c
+++ b/hw/pci-bridge/i82801b11.c
@@ -45,7 +45,7 @@
#include "hw/pci/pci_bridge.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-#include "hw/i386/ich9.h"
+#include "hw/southbridge/ich9.h"
/*****************************************************************************/
/* ICH9 DMI-to-PCI bridge */
diff --git a/hw/pci/msi.c b/hw/pci/msi.c
index 1cadf150bc..041b0bdbec 100644
--- a/hw/pci/msi.c
+++ b/hw/pci/msi.c
@@ -24,6 +24,8 @@
#include "qemu/range.h"
#include "qapi/error.h"
+#include "hw/i386/kvm/xen_evtchn.h"
+
/* PCI_MSI_ADDRESS_LO */
#define PCI_MSI_ADDRESS_LO_MASK (~0x3)
@@ -414,6 +416,15 @@ void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len)
fprintf(stderr, "\n");
#endif
+ if (xen_mode == XEN_EMULATE) {
+ for (vector = 0; vector < msi_nr_vectors(flags); vector++) {
+ MSIMessage msg = msi_prepare_message(dev, vector);
+
+ xen_evtchn_snoop_msi(dev, false, vector, msg.address, msg.data,
+ msi_is_masked(dev, vector));
+ }
+ }
+
if (!(flags & PCI_MSI_FLAGS_ENABLE)) {
return;
}
diff --git a/hw/pci/msix.c b/hw/pci/msix.c
index 9e70fcd6fa..ab8869d9d0 100644
--- a/hw/pci/msix.c
+++ b/hw/pci/msix.c
@@ -26,6 +26,8 @@
#include "qapi/error.h"
#include "trace.h"
+#include "hw/i386/kvm/xen_evtchn.h"
+
/* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
#define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
#define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
@@ -124,6 +126,13 @@ static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
{
bool is_masked = msix_is_masked(dev, vector);
+ if (xen_mode == XEN_EMULATE) {
+ MSIMessage msg = msix_prepare_message(dev, vector);
+
+ xen_evtchn_snoop_msi(dev, true, vector, msg.address, msg.data,
+ is_masked);
+ }
+
if (is_masked == was_masked) {
return;
}
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index cc51f98593..10c980b9f5 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -49,6 +49,9 @@
#include "qemu/cutils.h"
#include "pci-internal.h"
+#include "hw/xen/xen.h"
+#include "hw/i386/kvm/xen_evtchn.h"
+
//#define DEBUG_PCI
#ifdef DEBUG_PCI
# define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__)
@@ -319,6 +322,17 @@ static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg)
{
MemTxAttrs attrs = {};
+ /*
+ * Xen uses the high bits of the address to contain some of the bits
+ * of the PIRQ#. Therefore we can't just send the write cycle and
+ * trust that it's caught by the APIC at 0xfee00000 because the
+ * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166.
+ * So we intercept the delivery here instead of in kvm_send_msi().
+ */
+ if (xen_mode == XEN_EMULATE &&
+ xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) {
+ return;
+ }
attrs.requester_id = pci_requester_id(dev);
address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
attrs, NULL);
@@ -988,6 +1002,9 @@ static void do_pci_unregister_device(PCIDevice *pci_dev)
pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL;
pci_config_free(pci_dev);
+ if (xen_mode == XEN_EMULATE) {
+ xen_evtchn_remove_pci_device(pci_dev);
+ }
if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) {
memory_region_del_subregion(&pci_dev->bus_master_container_region,
&pci_dev->bus_master_enable_region);
@@ -1648,7 +1665,7 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev,
* 9.1: Interrupt routing. Table 9-1
*
* the PCI Express Base Specification, Revision 2.1
- * 2.2.8.1: INTx interrutp signaling - Rules
+ * 2.2.8.1: INTx interrupt signaling - Rules
* the Implementation Note
* Table 2-20
*/
diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c
index 71143b7692..01f44c19eb 100644
--- a/hw/ppc/pnv_lpc.c
+++ b/hw/ppc/pnv_lpc.c
@@ -837,7 +837,7 @@ ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp)
irqs = qemu_allocate_irqs(handler, lpc, ISA_NUM_IRQS);
- isa_bus_irqs(isa_bus, irqs);
+ isa_bus_register_input_irqs(isa_bus, irqs);
return isa_bus;
}
diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c
index fcbe4c5837..d00280c0f8 100644
--- a/hw/ppc/prep.c
+++ b/hw/ppc/prep.c
@@ -212,14 +212,13 @@ static int PPC_NVRAM_set_params (Nvram *nvram, uint16_t NVRAM_size,
static int prep_set_cmos_checksum(DeviceState *dev, void *opaque)
{
uint16_t checksum = *(uint16_t *)opaque;
- ISADevice *rtc;
if (object_dynamic_cast(OBJECT(dev), TYPE_MC146818_RTC)) {
- rtc = ISA_DEVICE(dev);
- rtc_set_memory(rtc, 0x2e, checksum & 0xff);
- rtc_set_memory(rtc, 0x3e, checksum & 0xff);
- rtc_set_memory(rtc, 0x2f, checksum >> 8);
- rtc_set_memory(rtc, 0x3f, checksum >> 8);
+ MC146818RtcState *rtc = MC146818_RTC(dev);
+ mc146818rtc_set_cmos_data(rtc, 0x2e, checksum & 0xff);
+ mc146818rtc_set_cmos_data(rtc, 0x3e, checksum & 0xff);
+ mc146818rtc_set_cmos_data(rtc, 0x2f, checksum >> 8);
+ mc146818rtc_set_cmos_data(rtc, 0x3f, checksum >> 8);
object_property_add_alias(qdev_get_machine(), "rtc-time", OBJECT(rtc),
"date");
diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c
index 4a22ce3761..cf065aae0e 100644
--- a/hw/ppc/sam460ex.c
+++ b/hw/ppc/sam460ex.c
@@ -389,8 +389,8 @@ static void sam460ex_init(MachineState *machine)
/* MAL */
dev = qdev_new(TYPE_PPC4xx_MAL);
- qdev_prop_set_uint32(dev, "txc-num", 4);
- qdev_prop_set_uint32(dev, "rxc-num", 16);
+ qdev_prop_set_uint8(dev, "txc-num", 4);
+ qdev_prop_set_uint8(dev, "rxc-num", 16);
ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(dev), cpu, &error_fatal);
object_unref(OBJECT(dev));
sbdev = SYS_BUS_DEVICE(dev);
diff --git a/hw/rtc/m48t59-isa.c b/hw/rtc/m48t59-isa.c
index e61f7ec370..5bb46f2383 100644
--- a/hw/rtc/m48t59-isa.c
+++ b/hw/rtc/m48t59-isa.c
@@ -47,7 +47,7 @@ struct M48txxISAState {
};
struct M48txxISADeviceClass {
- ISADeviceClass parent_class;
+ DeviceClass parent_class;
M48txxInfo info;
};
diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c
index ba612a151d..c27c362db9 100644
--- a/hw/rtc/mc146818rtc.c
+++ b/hw/rtc/mc146818rtc.c
@@ -71,19 +71,19 @@
#define RTC_ISA_BASE 0x70
-static void rtc_set_time(RTCState *s);
-static void rtc_update_time(RTCState *s);
-static void rtc_set_cmos(RTCState *s, const struct tm *tm);
-static inline int rtc_from_bcd(RTCState *s, int a);
-static uint64_t get_next_alarm(RTCState *s);
+static void rtc_set_time(MC146818RtcState *s);
+static void rtc_update_time(MC146818RtcState *s);
+static void rtc_set_cmos(MC146818RtcState *s, const struct tm *tm);
+static inline int rtc_from_bcd(MC146818RtcState *s, int a);
+static uint64_t get_next_alarm(MC146818RtcState *s);
-static inline bool rtc_running(RTCState *s)
+static inline bool rtc_running(MC146818RtcState *s)
{
return (!(s->cmos_data[RTC_REG_B] & REG_B_SET) &&
(s->cmos_data[RTC_REG_A] & 0x70) <= 0x20);
}
-static uint64_t get_guest_rtc_ns(RTCState *s)
+static uint64_t get_guest_rtc_ns(MC146818RtcState *s)
{
uint64_t guest_clock = qemu_clock_get_ns(rtc_clock);
@@ -91,7 +91,7 @@ static uint64_t get_guest_rtc_ns(RTCState *s)
guest_clock - s->last_update + s->offset;
}
-static void rtc_coalesced_timer_update(RTCState *s)
+static void rtc_coalesced_timer_update(MC146818RtcState *s)
{
if (s->irq_coalesced == 0) {
timer_del(s->coalesced_timer);
@@ -104,19 +104,19 @@ static void rtc_coalesced_timer_update(RTCState *s)
}
}
-static QLIST_HEAD(, RTCState) rtc_devices =
+static QLIST_HEAD(, MC146818RtcState) rtc_devices =
QLIST_HEAD_INITIALIZER(rtc_devices);
void qmp_rtc_reset_reinjection(Error **errp)
{
- RTCState *s;
+ MC146818RtcState *s;
QLIST_FOREACH(s, &rtc_devices, link) {
s->irq_coalesced = 0;
}
}
-static bool rtc_policy_slew_deliver_irq(RTCState *s)
+static bool rtc_policy_slew_deliver_irq(MC146818RtcState *s)
{
kvm_reset_irq_delivered();
qemu_irq_raise(s->irq);
@@ -125,7 +125,7 @@ static bool rtc_policy_slew_deliver_irq(RTCState *s)
static void rtc_coalesced_timer(void *opaque)
{
- RTCState *s = opaque;
+ MC146818RtcState *s = opaque;
if (s->irq_coalesced != 0) {
s->cmos_data[RTC_REG_C] |= 0xc0;
@@ -140,7 +140,7 @@ static void rtc_coalesced_timer(void *opaque)
rtc_coalesced_timer_update(s);
}
-static uint32_t rtc_periodic_clock_ticks(RTCState *s)
+static uint32_t rtc_periodic_clock_ticks(MC146818RtcState *s)
{
int period_code;
@@ -157,8 +157,8 @@ static uint32_t rtc_periodic_clock_ticks(RTCState *s)
* handle periodic timer. @old_period indicates the periodic timer update
* is just due to period adjustment.
*/
-static void
-periodic_timer_update(RTCState *s, int64_t current_time, uint32_t old_period, bool period_change)
+static void periodic_timer_update(MC146818RtcState *s, int64_t current_time,
+ uint32_t old_period, bool period_change)
{
uint32_t period;
int64_t cur_clock, next_irq_clock, lost_clock = 0;
@@ -234,7 +234,7 @@ periodic_timer_update(RTCState *s, int64_t current_time, uint32_t old_period, bo
static void rtc_periodic_timer(void *opaque)
{
- RTCState *s = opaque;
+ MC146818RtcState *s = opaque;
periodic_timer_update(s, s->next_periodic_time, s->period, false);
s->cmos_data[RTC_REG_C] |= REG_C_PF;
@@ -255,7 +255,7 @@ static void rtc_periodic_timer(void *opaque)
}
/* handle update-ended timer */
-static void check_update_timer(RTCState *s)
+static void check_update_timer(MC146818RtcState *s)
{
uint64_t next_update_time;
uint64_t guest_nsec;
@@ -306,7 +306,7 @@ static void check_update_timer(RTCState *s)
}
}
-static inline uint8_t convert_hour(RTCState *s, uint8_t hour)
+static inline uint8_t convert_hour(MC146818RtcState *s, uint8_t hour)
{
if (!(s->cmos_data[RTC_REG_B] & REG_B_24H)) {
hour %= 12;
@@ -317,7 +317,7 @@ static inline uint8_t convert_hour(RTCState *s, uint8_t hour)
return hour;
}
-static uint64_t get_next_alarm(RTCState *s)
+static uint64_t get_next_alarm(MC146818RtcState *s)
{
int32_t alarm_sec, alarm_min, alarm_hour, cur_hour, cur_min, cur_sec;
int32_t hour, min, sec;
@@ -410,7 +410,7 @@ static uint64_t get_next_alarm(RTCState *s)
static void rtc_update_timer(void *opaque)
{
- RTCState *s = opaque;
+ MC146818RtcState *s = opaque;
int32_t irqs = REG_C_UF;
int32_t new_irqs;
@@ -439,7 +439,7 @@ static void rtc_update_timer(void *opaque)
static void cmos_ioport_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
- RTCState *s = opaque;
+ MC146818RtcState *s = opaque;
uint32_t old_period;
bool update_periodic_timer;
@@ -557,7 +557,7 @@ static void cmos_ioport_write(void *opaque, hwaddr addr,
}
}
-static inline int rtc_to_bcd(RTCState *s, int a)
+static inline int rtc_to_bcd(MC146818RtcState *s, int a)
{
if (s->cmos_data[RTC_REG_B] & REG_B_DM) {
return a;
@@ -566,7 +566,7 @@ static inline int rtc_to_bcd(RTCState *s, int a)
}
}
-static inline int rtc_from_bcd(RTCState *s, int a)
+static inline int rtc_from_bcd(MC146818RtcState *s, int a)
{
if ((a & 0xc0) == 0xc0) {
return -1;
@@ -578,7 +578,7 @@ static inline int rtc_from_bcd(RTCState *s, int a)
}
}
-static void rtc_get_time(RTCState *s, struct tm *tm)
+static void rtc_get_time(MC146818RtcState *s, struct tm *tm)
{
tm->tm_sec = rtc_from_bcd(s, s->cmos_data[RTC_SECONDS]);
tm->tm_min = rtc_from_bcd(s, s->cmos_data[RTC_MINUTES]);
@@ -597,7 +597,7 @@ static void rtc_get_time(RTCState *s, struct tm *tm)
rtc_from_bcd(s, s->cmos_data[RTC_CENTURY]) * 100 - 1900;
}
-static void rtc_set_time(RTCState *s)
+static void rtc_set_time(MC146818RtcState *s)
{
struct tm tm;
g_autofree const char *qom_path = object_get_canonical_path(OBJECT(s));
@@ -609,7 +609,7 @@ static void rtc_set_time(RTCState *s)
qapi_event_send_rtc_change(qemu_timedate_diff(&tm), qom_path);
}
-static void rtc_set_cmos(RTCState *s, const struct tm *tm)
+static void rtc_set_cmos(MC146818RtcState *s, const struct tm *tm)
{
int year;
@@ -633,7 +633,7 @@ static void rtc_set_cmos(RTCState *s, const struct tm *tm)
s->cmos_data[RTC_CENTURY] = rtc_to_bcd(s, year / 100);
}
-static void rtc_update_time(RTCState *s)
+static void rtc_update_time(MC146818RtcState *s)
{
struct tm ret;
time_t guest_sec;
@@ -649,7 +649,7 @@ static void rtc_update_time(RTCState *s)
}
}
-static int update_in_progress(RTCState *s)
+static int update_in_progress(MC146818RtcState *s)
{
int64_t guest_nsec;
@@ -678,7 +678,7 @@ static int update_in_progress(RTCState *s)
static uint64_t cmos_ioport_read(void *opaque, hwaddr addr,
unsigned size)
{
- RTCState *s = opaque;
+ MC146818RtcState *s = opaque;
int ret;
if ((addr & 1) == 0) {
return 0xff;
@@ -739,23 +739,21 @@ static uint64_t cmos_ioport_read(void *opaque, hwaddr addr,
}
}
-void rtc_set_memory(ISADevice *dev, int addr, int val)
+void mc146818rtc_set_cmos_data(MC146818RtcState *s, int addr, int val)
{
- RTCState *s = MC146818_RTC(dev);
if (addr >= 0 && addr <= 127)
s->cmos_data[addr] = val;
}
-int rtc_get_memory(ISADevice *dev, int addr)
+int mc146818rtc_get_cmos_data(MC146818RtcState *s, int addr)
{
- RTCState *s = MC146818_RTC(dev);
assert(addr >= 0 && addr <= 127);
return s->cmos_data[addr];
}
static void rtc_set_date_from_host(ISADevice *dev)
{
- RTCState *s = MC146818_RTC(dev);
+ MC146818RtcState *s = MC146818_RTC(dev);
struct tm tm;
qemu_get_timedate(&tm, 0);
@@ -770,7 +768,7 @@ static void rtc_set_date_from_host(ISADevice *dev)
static int rtc_pre_save(void *opaque)
{
- RTCState *s = opaque;
+ MC146818RtcState *s = opaque;
rtc_update_time(s);
@@ -779,7 +777,7 @@ static int rtc_pre_save(void *opaque)
static int rtc_post_load(void *opaque, int version_id)
{
- RTCState *s = opaque;
+ MC146818RtcState *s = opaque;
if (version_id <= 2 || rtc_clock == QEMU_CLOCK_REALTIME) {
rtc_set_time(s);
@@ -810,7 +808,7 @@ static int rtc_post_load(void *opaque, int version_id)
static bool rtc_irq_reinject_on_ack_count_needed(void *opaque)
{
- RTCState *s = (RTCState *)opaque;
+ MC146818RtcState *s = (MC146818RtcState *)opaque;
return s->irq_reinject_on_ack_count != 0;
}
@@ -820,7 +818,7 @@ static const VMStateDescription vmstate_rtc_irq_reinject_on_ack_count = {
.minimum_version_id = 1,
.needed = rtc_irq_reinject_on_ack_count_needed,
.fields = (VMStateField[]) {
- VMSTATE_UINT16(irq_reinject_on_ack_count, RTCState),
+ VMSTATE_UINT16(irq_reinject_on_ack_count, MC146818RtcState),
VMSTATE_END_OF_LIST()
}
};
@@ -832,19 +830,19 @@ static const VMStateDescription vmstate_rtc = {
.pre_save = rtc_pre_save,
.post_load = rtc_post_load,
.fields = (VMStateField[]) {
- VMSTATE_BUFFER(cmos_data, RTCState),
- VMSTATE_UINT8(cmos_index, RTCState),
+ VMSTATE_BUFFER(cmos_data, MC146818RtcState),
+ VMSTATE_UINT8(cmos_index, MC146818RtcState),
VMSTATE_UNUSED(7*4),
- VMSTATE_TIMER_PTR(periodic_timer, RTCState),
- VMSTATE_INT64(next_periodic_time, RTCState),
+ VMSTATE_TIMER_PTR(periodic_timer, MC146818RtcState),
+ VMSTATE_INT64(next_periodic_time, MC146818RtcState),
VMSTATE_UNUSED(3*8),
- VMSTATE_UINT32_V(irq_coalesced, RTCState, 2),
- VMSTATE_UINT32_V(period, RTCState, 2),
- VMSTATE_UINT64_V(base_rtc, RTCState, 3),
- VMSTATE_UINT64_V(last_update, RTCState, 3),
- VMSTATE_INT64_V(offset, RTCState, 3),
- VMSTATE_TIMER_PTR_V(update_timer, RTCState, 3),
- VMSTATE_UINT64_V(next_alarm_time, RTCState, 3),
+ VMSTATE_UINT32_V(irq_coalesced, MC146818RtcState, 2),
+ VMSTATE_UINT32_V(period, MC146818RtcState, 2),
+ VMSTATE_UINT64_V(base_rtc, MC146818RtcState, 3),
+ VMSTATE_UINT64_V(last_update, MC146818RtcState, 3),
+ VMSTATE_INT64_V(offset, MC146818RtcState, 3),
+ VMSTATE_TIMER_PTR_V(update_timer, MC146818RtcState, 3),
+ VMSTATE_UINT64_V(next_alarm_time, MC146818RtcState, 3),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
@@ -857,8 +855,9 @@ static const VMStateDescription vmstate_rtc = {
BIOS will read it and start S3 resume at POST Entry */
static void rtc_notify_suspend(Notifier *notifier, void *data)
{
- RTCState *s = container_of(notifier, RTCState, suspend_notifier);
- rtc_set_memory(ISA_DEVICE(s), 0xF, 0xFE);
+ MC146818RtcState *s = container_of(notifier, MC146818RtcState,
+ suspend_notifier);
+ mc146818rtc_set_cmos_data(s, 0xF, 0xFE);
}
static const MemoryRegionOps cmos_ops = {
@@ -873,7 +872,7 @@ static const MemoryRegionOps cmos_ops = {
static void rtc_get_date(Object *obj, struct tm *current_tm, Error **errp)
{
- RTCState *s = MC146818_RTC(obj);
+ MC146818RtcState *s = MC146818_RTC(obj);
rtc_update_time(s);
rtc_get_time(s, current_tm);
@@ -882,7 +881,7 @@ static void rtc_get_date(Object *obj, struct tm *current_tm, Error **errp)
static void rtc_realizefn(DeviceState *dev, Error **errp)
{
ISADevice *isadev = ISA_DEVICE(dev);
- RTCState *s = MC146818_RTC(dev);
+ MC146818RtcState *s = MC146818_RTC(dev);
s->cmos_data[RTC_REG_A] = 0x26;
s->cmos_data[RTC_REG_B] = 0x02;
@@ -945,11 +944,12 @@ static void rtc_realizefn(DeviceState *dev, Error **errp)
QLIST_INSERT_HEAD(&rtc_devices, s, link);
}
-ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq)
+MC146818RtcState *mc146818_rtc_init(ISABus *bus, int base_year,
+ qemu_irq intercept_irq)
{
DeviceState *dev;
ISADevice *isadev;
- RTCState *s;
+ MC146818RtcState *s;
isadev = isa_new(TYPE_MC146818_RTC);
dev = DEVICE(isadev);
@@ -965,21 +965,21 @@ ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq)
object_property_add_alias(qdev_get_machine(), "rtc-time", OBJECT(isadev),
"date");
- return isadev;
+ return s;
}
static Property mc146818rtc_properties[] = {
- DEFINE_PROP_INT32("base_year", RTCState, base_year, 1980),
- DEFINE_PROP_UINT16("iobase", RTCState, io_base, RTC_ISA_BASE),
- DEFINE_PROP_UINT8("irq", RTCState, isairq, RTC_ISA_IRQ),
- DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", RTCState,
+ DEFINE_PROP_INT32("base_year", MC146818RtcState, base_year, 1980),
+ DEFINE_PROP_UINT16("iobase", MC146818RtcState, io_base, RTC_ISA_BASE),
+ DEFINE_PROP_UINT8("irq", MC146818RtcState, isairq, RTC_ISA_IRQ),
+ DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", MC146818RtcState,
lost_tick_policy, LOST_TICK_POLICY_DISCARD),
DEFINE_PROP_END_OF_LIST(),
};
static void rtc_reset_enter(Object *obj, ResetType type)
{
- RTCState *s = MC146818_RTC(obj);
+ MC146818RtcState *s = MC146818_RTC(obj);
/* Reason: VM do suspend self will set 0xfe
* Reset any values other than 0xfe(Guest suspend case) */
@@ -1000,14 +1000,14 @@ static void rtc_reset_enter(Object *obj, ResetType type)
static void rtc_reset_hold(Object *obj)
{
- RTCState *s = MC146818_RTC(obj);
+ MC146818RtcState *s = MC146818_RTC(obj);
qemu_irq_lower(s->irq);
}
static void rtc_build_aml(AcpiDevAmlIf *adev, Aml *scope)
{
- RTCState *s = MC146818_RTC(adev);
+ MC146818RtcState *s = MC146818_RTC(adev);
Aml *dev;
Aml *crs;
@@ -1045,7 +1045,7 @@ static void rtc_class_initfn(ObjectClass *klass, void *data)
static const TypeInfo mc146818rtc_info = {
.name = TYPE_MC146818_RTC,
.parent = TYPE_ISA_DEVICE,
- .instance_size = sizeof(RTCState),
+ .instance_size = sizeof(MC146818RtcState),
.class_init = rtc_class_initfn,
.interfaces = (InterfaceInfo[]) {
{ TYPE_ACPI_DEV_AML_IF },
diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c
index 39fc4f19d9..826a0a31b5 100644
--- a/hw/sh4/r2d.c
+++ b/hw/sh4/r2d.c
@@ -38,7 +38,7 @@
#include "hw/qdev-properties.h"
#include "net/net.h"
#include "sh7750_regs.h"
-#include "hw/ide.h"
+#include "hw/ide/mmio.h"
#include "hw/irq.h"
#include "hw/loader.h"
#include "hw/usb.h"
@@ -274,7 +274,7 @@ static void r2d_init(MachineState *machine)
dev = qdev_new("sysbus-sm501");
busdev = SYS_BUS_DEVICE(dev);
qdev_prop_set_uint32(dev, "vram-size", SM501_VRAM_SIZE);
- qdev_prop_set_uint32(dev, "base", 0x10000000);
+ qdev_prop_set_uint64(dev, "dma-offset", 0x10000000);
qdev_prop_set_chr(dev, "chardev", serial_hd(2));
sysbus_realize_and_unref(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, 0x10000000);
diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c
index 387181ff77..a25e951f9d 100644
--- a/hw/sparc64/sun4u.c
+++ b/hw/sparc64/sun4u.c
@@ -28,6 +28,7 @@
#include "qapi/error.h"
#include "qemu/datadir.h"
#include "cpu.h"
+#include "hw/irq.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pci_bus.h"
@@ -84,7 +85,8 @@ struct EbusState {
PCIDevice parent_obj;
ISABus *isa_bus;
- qemu_irq isa_bus_irqs[ISA_NUM_IRQS];
+ qemu_irq *isa_irqs_in;
+ qemu_irq isa_irqs_out[ISA_NUM_IRQS];
uint64_t console_serial_base;
MemoryRegion bar0;
MemoryRegion bar1;
@@ -287,7 +289,7 @@ static const TypeInfo power_info = {
static void ebus_isa_irq_handler(void *opaque, int n, int level)
{
EbusState *s = EBUS(opaque);
- qemu_irq irq = s->isa_bus_irqs[n];
+ qemu_irq irq = s->isa_irqs_out[n];
/* Pass ISA bus IRQs onto their gpio equivalent */
trace_ebus_isa_irq_handler(n, level);
@@ -303,7 +305,6 @@ static void ebus_realize(PCIDevice *pci_dev, Error **errp)
ISADevice *isa_dev;
SysBusDevice *sbd;
DeviceState *dev;
- qemu_irq *isa_irq;
DriveInfo *fd[MAX_FD];
int i;
@@ -315,9 +316,9 @@ static void ebus_realize(PCIDevice *pci_dev, Error **errp)
}
/* ISA bus */
- isa_irq = qemu_allocate_irqs(ebus_isa_irq_handler, s, ISA_NUM_IRQS);
- isa_bus_irqs(s->isa_bus, isa_irq);
- qdev_init_gpio_out_named(DEVICE(s), s->isa_bus_irqs, "isa-irq",
+ s->isa_irqs_in = qemu_allocate_irqs(ebus_isa_irq_handler, s, ISA_NUM_IRQS);
+ isa_bus_register_input_irqs(s->isa_bus, s->isa_irqs_in);
+ qdev_init_gpio_out_named(DEVICE(s), s->isa_irqs_out, "isa-irq",
ISA_NUM_IRQS);
/* Serial ports */
diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c
index 9520471be2..214d6a0501 100644
--- a/hw/timer/hpet.c
+++ b/hw/timer/hpet.c
@@ -30,6 +30,7 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
+#include "hw/qdev-properties.h"
#include "hw/timer/hpet.h"
#include "hw/sysbus.h"
#include "hw/rtc/mc146818rtc.h"
diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c
index 28164d89be..be0a4fc3bc 100644
--- a/hw/usb/dev-smartcard-reader.c
+++ b/hw/usb/dev-smartcard-reader.c
@@ -278,7 +278,9 @@ typedef struct BulkIn {
struct CCIDBus {
BusState qbus;
};
-typedef struct CCIDBus CCIDBus;
+
+#define TYPE_CCID_BUS "ccid-bus"
+OBJECT_DECLARE_SIMPLE_TYPE(CCIDBus, CCID_BUS)
/*
* powered - defaults to true, changed by PowerOn/PowerOff messages
@@ -1174,9 +1176,6 @@ static Property ccid_props[] = {
DEFINE_PROP_END_OF_LIST(),
};
-#define TYPE_CCID_BUS "ccid-bus"
-OBJECT_DECLARE_SIMPLE_TYPE(CCIDBus, CCID_BUS)
-
static const TypeInfo ccid_bus_info = {
.name = TYPE_CCID_BUS,
.parent = TYPE_BUS,
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index 9d68036d23..6f8b543243 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -23,7 +23,7 @@
* o Disable timers when nothing needs to be done, or remove timer usage
* all together.
* o BIOS work to boot from USB storage
-*/
+ */
#include "qemu/osdep.h"
#include "hw/irq.h"
@@ -39,7 +39,7 @@
#include "hcd-ohci.h"
/* This causes frames to occur 1000x slower */
-//#define OHCI_TIME_WARP 1
+/*#define OHCI_TIME_WARP 1*/
#define ED_LINK_LIMIT 32
@@ -58,48 +58,48 @@ struct ohci_hcca {
#define ED_WBACK_OFFSET offsetof(struct ohci_ed, head)
#define ED_WBACK_SIZE 4
-/* Bitfields for the first word of an Endpoint Desciptor. */
+/* Bitfields for the first word of an Endpoint Descriptor. */
#define OHCI_ED_FA_SHIFT 0
-#define OHCI_ED_FA_MASK (0x7f<<OHCI_ED_FA_SHIFT)
+#define OHCI_ED_FA_MASK (0x7f << OHCI_ED_FA_SHIFT)
#define OHCI_ED_EN_SHIFT 7
-#define OHCI_ED_EN_MASK (0xf<<OHCI_ED_EN_SHIFT)
+#define OHCI_ED_EN_MASK (0xf << OHCI_ED_EN_SHIFT)
#define OHCI_ED_D_SHIFT 11
-#define OHCI_ED_D_MASK (3<<OHCI_ED_D_SHIFT)
-#define OHCI_ED_S (1<<13)
-#define OHCI_ED_K (1<<14)
-#define OHCI_ED_F (1<<15)
+#define OHCI_ED_D_MASK (3 << OHCI_ED_D_SHIFT)
+#define OHCI_ED_S (1 << 13)
+#define OHCI_ED_K (1 << 14)
+#define OHCI_ED_F (1 << 15)
#define OHCI_ED_MPS_SHIFT 16
-#define OHCI_ED_MPS_MASK (0x7ff<<OHCI_ED_MPS_SHIFT)
+#define OHCI_ED_MPS_MASK (0x7ff << OHCI_ED_MPS_SHIFT)
-/* Flags in the head field of an Endpoint Desciptor. */
+/* Flags in the head field of an Endpoint Descriptor. */
#define OHCI_ED_H 1
#define OHCI_ED_C 2
-/* Bitfields for the first word of a Transfer Desciptor. */
-#define OHCI_TD_R (1<<18)
+/* Bitfields for the first word of a Transfer Descriptor. */
+#define OHCI_TD_R (1 << 18)
#define OHCI_TD_DP_SHIFT 19
-#define OHCI_TD_DP_MASK (3<<OHCI_TD_DP_SHIFT)
+#define OHCI_TD_DP_MASK (3 << OHCI_TD_DP_SHIFT)
#define OHCI_TD_DI_SHIFT 21
-#define OHCI_TD_DI_MASK (7<<OHCI_TD_DI_SHIFT)
-#define OHCI_TD_T0 (1<<24)
-#define OHCI_TD_T1 (1<<25)
+#define OHCI_TD_DI_MASK (7 << OHCI_TD_DI_SHIFT)
+#define OHCI_TD_T0 (1 << 24)
+#define OHCI_TD_T1 (1 << 25)
#define OHCI_TD_EC_SHIFT 26
-#define OHCI_TD_EC_MASK (3<<OHCI_TD_EC_SHIFT)
+#define OHCI_TD_EC_MASK (3 << OHCI_TD_EC_SHIFT)
#define OHCI_TD_CC_SHIFT 28
-#define OHCI_TD_CC_MASK (0xf<<OHCI_TD_CC_SHIFT)
+#define OHCI_TD_CC_MASK (0xf << OHCI_TD_CC_SHIFT)
-/* Bitfields for the first word of an Isochronous Transfer Desciptor. */
-/* CC & DI - same as in the General Transfer Desciptor */
+/* Bitfields for the first word of an Isochronous Transfer Descriptor. */
+/* CC & DI - same as in the General Transfer Descriptor */
#define OHCI_TD_SF_SHIFT 0
-#define OHCI_TD_SF_MASK (0xffff<<OHCI_TD_SF_SHIFT)
+#define OHCI_TD_SF_MASK (0xffff << OHCI_TD_SF_SHIFT)
#define OHCI_TD_FC_SHIFT 24
-#define OHCI_TD_FC_MASK (7<<OHCI_TD_FC_SHIFT)
+#define OHCI_TD_FC_MASK (7 << OHCI_TD_FC_SHIFT)
-/* Isochronous Transfer Desciptor - Offset / PacketStatusWord */
+/* Isochronous Transfer Descriptor - Offset / PacketStatusWord */
#define OHCI_TD_PSW_CC_SHIFT 12
-#define OHCI_TD_PSW_CC_MASK (0xf<<OHCI_TD_PSW_CC_SHIFT)
+#define OHCI_TD_PSW_CC_MASK (0xf << OHCI_TD_PSW_CC_SHIFT)
#define OHCI_TD_PSW_SIZE_SHIFT 0
-#define OHCI_TD_PSW_SIZE_MASK (0xfff<<OHCI_TD_PSW_SIZE_SHIFT)
+#define OHCI_TD_PSW_SIZE_MASK (0xfff << OHCI_TD_PSW_SIZE_SHIFT)
#define OHCI_PAGE_MASK 0xfffff000
#define OHCI_OFFSET_MASK 0xfff
@@ -112,7 +112,7 @@ struct ohci_hcca {
#define OHCI_SET_BM(val, field, newval) do { \
val &= ~OHCI_##field##_MASK; \
val |= ((newval) << OHCI_##field##_SHIFT) & OHCI_##field##_MASK; \
- } while(0)
+ } while (0)
/* endpoint descriptor */
struct ohci_ed {
@@ -142,35 +142,35 @@ struct ohci_iso_td {
#define USB_HZ 12000000
/* OHCI Local stuff */
-#define OHCI_CTL_CBSR ((1<<0)|(1<<1))
-#define OHCI_CTL_PLE (1<<2)
-#define OHCI_CTL_IE (1<<3)
-#define OHCI_CTL_CLE (1<<4)
-#define OHCI_CTL_BLE (1<<5)
-#define OHCI_CTL_HCFS ((1<<6)|(1<<7))
+#define OHCI_CTL_CBSR ((1 << 0) | (1 << 1))
+#define OHCI_CTL_PLE (1 << 2)
+#define OHCI_CTL_IE (1 << 3)
+#define OHCI_CTL_CLE (1 << 4)
+#define OHCI_CTL_BLE (1 << 5)
+#define OHCI_CTL_HCFS ((1 << 6) | (1 << 7))
#define OHCI_USB_RESET 0x00
#define OHCI_USB_RESUME 0x40
#define OHCI_USB_OPERATIONAL 0x80
#define OHCI_USB_SUSPEND 0xc0
-#define OHCI_CTL_IR (1<<8)
-#define OHCI_CTL_RWC (1<<9)
-#define OHCI_CTL_RWE (1<<10)
-
-#define OHCI_STATUS_HCR (1<<0)
-#define OHCI_STATUS_CLF (1<<1)
-#define OHCI_STATUS_BLF (1<<2)
-#define OHCI_STATUS_OCR (1<<3)
-#define OHCI_STATUS_SOC ((1<<6)|(1<<7))
-
-#define OHCI_INTR_SO (1U<<0) /* Scheduling overrun */
-#define OHCI_INTR_WD (1U<<1) /* HcDoneHead writeback */
-#define OHCI_INTR_SF (1U<<2) /* Start of frame */
-#define OHCI_INTR_RD (1U<<3) /* Resume detect */
-#define OHCI_INTR_UE (1U<<4) /* Unrecoverable error */
-#define OHCI_INTR_FNO (1U<<5) /* Frame number overflow */
-#define OHCI_INTR_RHSC (1U<<6) /* Root hub status change */
-#define OHCI_INTR_OC (1U<<30) /* Ownership change */
-#define OHCI_INTR_MIE (1U<<31) /* Master Interrupt Enable */
+#define OHCI_CTL_IR (1 << 8)
+#define OHCI_CTL_RWC (1 << 9)
+#define OHCI_CTL_RWE (1 << 10)
+
+#define OHCI_STATUS_HCR (1 << 0)
+#define OHCI_STATUS_CLF (1 << 1)
+#define OHCI_STATUS_BLF (1 << 2)
+#define OHCI_STATUS_OCR (1 << 3)
+#define OHCI_STATUS_SOC ((1 << 6) | (1 << 7))
+
+#define OHCI_INTR_SO (1U << 0) /* Scheduling overrun */
+#define OHCI_INTR_WD (1U << 1) /* HcDoneHead writeback */
+#define OHCI_INTR_SF (1U << 2) /* Start of frame */
+#define OHCI_INTR_RD (1U << 3) /* Resume detect */
+#define OHCI_INTR_UE (1U << 4) /* Unrecoverable error */
+#define OHCI_INTR_FNO (1U << 5) /* Frame number overflow */
+#define OHCI_INTR_RHSC (1U << 6) /* Root hub status change */
+#define OHCI_INTR_OC (1U << 30) /* Ownership change */
+#define OHCI_INTR_MIE (1U << 31) /* Master Interrupt Enable */
#define OHCI_HCCA_SIZE 0x100
#define OHCI_HCCA_MASK 0xffffff00
@@ -181,40 +181,40 @@ struct ohci_iso_td {
#define OHCI_FMI_FSMPS 0xffff0000
#define OHCI_FMI_FIT 0x80000000
-#define OHCI_FR_RT (1U<<31)
+#define OHCI_FR_RT (1U << 31)
#define OHCI_LS_THRESH 0x628
#define OHCI_RHA_RW_MASK 0x00000000 /* Mask of supported features. */
-#define OHCI_RHA_PSM (1<<8)
-#define OHCI_RHA_NPS (1<<9)
-#define OHCI_RHA_DT (1<<10)
-#define OHCI_RHA_OCPM (1<<11)
-#define OHCI_RHA_NOCP (1<<12)
+#define OHCI_RHA_PSM (1 << 8)
+#define OHCI_RHA_NPS (1 << 9)
+#define OHCI_RHA_DT (1 << 10)
+#define OHCI_RHA_OCPM (1 << 11)
+#define OHCI_RHA_NOCP (1 << 12)
#define OHCI_RHA_POTPGT_MASK 0xff000000
-#define OHCI_RHS_LPS (1U<<0)
-#define OHCI_RHS_OCI (1U<<1)
-#define OHCI_RHS_DRWE (1U<<15)
-#define OHCI_RHS_LPSC (1U<<16)
-#define OHCI_RHS_OCIC (1U<<17)
-#define OHCI_RHS_CRWE (1U<<31)
-
-#define OHCI_PORT_CCS (1<<0)
-#define OHCI_PORT_PES (1<<1)
-#define OHCI_PORT_PSS (1<<2)
-#define OHCI_PORT_POCI (1<<3)
-#define OHCI_PORT_PRS (1<<4)
-#define OHCI_PORT_PPS (1<<8)
-#define OHCI_PORT_LSDA (1<<9)
-#define OHCI_PORT_CSC (1<<16)
-#define OHCI_PORT_PESC (1<<17)
-#define OHCI_PORT_PSSC (1<<18)
-#define OHCI_PORT_OCIC (1<<19)
-#define OHCI_PORT_PRSC (1<<20)
-#define OHCI_PORT_WTC (OHCI_PORT_CSC|OHCI_PORT_PESC|OHCI_PORT_PSSC \
- |OHCI_PORT_OCIC|OHCI_PORT_PRSC)
-
+#define OHCI_RHS_LPS (1U << 0)
+#define OHCI_RHS_OCI (1U << 1)
+#define OHCI_RHS_DRWE (1U << 15)
+#define OHCI_RHS_LPSC (1U << 16)
+#define OHCI_RHS_OCIC (1U << 17)
+#define OHCI_RHS_CRWE (1U << 31)
+
+#define OHCI_PORT_CCS (1 << 0)
+#define OHCI_PORT_PES (1 << 1)
+#define OHCI_PORT_PSS (1 << 2)
+#define OHCI_PORT_POCI (1 << 3)
+#define OHCI_PORT_PRS (1 << 4)
+#define OHCI_PORT_PPS (1 << 8)
+#define OHCI_PORT_LSDA (1 << 9)
+#define OHCI_PORT_CSC (1 << 16)
+#define OHCI_PORT_PESC (1 << 17)
+#define OHCI_PORT_PSSC (1 << 18)
+#define OHCI_PORT_OCIC (1 << 19)
+#define OHCI_PORT_PRSC (1 << 20)
+#define OHCI_PORT_WTC (OHCI_PORT_CSC | OHCI_PORT_PESC | \
+ OHCI_PORT_PSSC | OHCI_PORT_OCIC | \
+ OHCI_PORT_PRSC)
#define OHCI_TD_DIR_SETUP 0x0
#define OHCI_TD_DIR_OUT 0x1
#define OHCI_TD_DIR_IN 0x2
@@ -235,6 +235,24 @@ struct ohci_iso_td {
#define OHCI_HRESET_FSBIR (1 << 0)
+static const char *ohci_reg_names[] = {
+ "HcRevision", "HcControl", "HcCommandStatus", "HcInterruptStatus",
+ "HcInterruptEnable", "HcInterruptDisable", "HcHCCA", "HcPeriodCurrentED",
+ "HcControlHeadED", "HcControlCurrentED", "HcBulkHeadED", "HcBulkCurrentED",
+ "HcDoneHead", "HcFmInterval", "HcFmRemaining", "HcFmNumber",
+ "HcPeriodicStart", "HcLSThreshold", "HcRhDescriptorA", "HcRhDescriptorB",
+ "HcRhStatus"
+};
+
+static const char *ohci_reg_name(hwaddr addr)
+{
+ if (addr >> 2 < ARRAY_SIZE(ohci_reg_names)) {
+ return ohci_reg_names[addr >> 2];
+ } else {
+ return "<unknown>";
+ }
+}
+
static void ohci_die(OHCIState *ohci)
{
ohci->ohci_die(ohci);
@@ -335,8 +353,8 @@ static void ohci_soft_reset(OHCIState *ohci)
ohci->per_cur = 0;
ohci->done = 0;
ohci->done_count = 7;
-
- /* FSMPS is marked TBD in OCHI 1.0, what gives ffs?
+ /*
+ * FSMPS is marked TBD in OCHI 1.0, what gives ffs?
* I took the value linux sets ...
*/
ohci->fsmps = 0x2778;
@@ -460,10 +478,10 @@ static inline int ohci_read_hcca(OHCIState *ohci,
static inline int ohci_put_ed(OHCIState *ohci,
dma_addr_t addr, struct ohci_ed *ed)
{
- /* ed->tail is under control of the HCD.
+ /*
+ * ed->tail is under control of the HCD.
* Since just ed->head is changed by HC, just write back this
*/
-
return put_dwords(ohci, addr + ED_WBACK_OFFSET,
(uint32_t *)((char *)ed + ED_WBACK_OFFSET),
ED_WBACK_SIZE >> 2);
@@ -499,9 +517,9 @@ static int ohci_copy_td(OHCIState *ohci, struct ohci_td *td,
ptr = td->cbp;
n = 0x1000 - (ptr & 0xfff);
- if (n > len)
+ if (n > len) {
n = len;
-
+ }
if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf,
n, dir, MEMTXATTRS_UNSPECIFIED)) {
return -1;
@@ -527,9 +545,9 @@ static int ohci_copy_iso_td(OHCIState *ohci,
ptr = start_addr;
n = 0x1000 - (ptr & 0xfff);
- if (n > len)
+ if (n > len) {
n = len;
-
+ }
if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf,
n, dir, MEMTXATTRS_UNSPECIFIED)) {
return -1;
@@ -584,7 +602,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed)
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
frame_count = OHCI_BM(iso_td.flags, TD_FC);
- relative_frame_number = USUB(ohci->frame_number, starting_frame);
+ relative_frame_number = USUB(ohci->frame_number, starting_frame);
trace_usb_ohci_iso_td_head(
ed->head & OHCI_DPTR_MASK, ed->tail & OHCI_DPTR_MASK,
@@ -601,8 +619,10 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed)
trace_usb_ohci_iso_td_relative_frame_number_neg(relative_frame_number);
return 1;
} else if (relative_frame_number > frame_count) {
- /* ISO TD expired - retire the TD to the Done Queue and continue with
- the next ISO TD of the same ED */
+ /*
+ * ISO TD expired - retire the TD to the Done Queue and continue with
+ * the next ISO TD of the same ED
+ */
trace_usb_ohci_iso_td_relative_frame_number_big(relative_frame_number,
frame_count);
if (OHCI_CC_DATAOVERRUN == OHCI_BM(iso_td.flags, TD_CC)) {
@@ -615,8 +635,9 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed)
iso_td.next = ohci->done;
ohci->done = addr;
i = OHCI_BM(iso_td.flags, TD_DI);
- if (i < ohci->done_count)
+ if (i < ohci->done_count) {
ohci->done_count = i;
+ }
if (ohci_put_iso_td(ohci, addr, &iso_td)) {
ohci_die(ohci);
return 1;
@@ -655,8 +676,8 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed)
next_offset = iso_td.be;
}
- if (!(OHCI_BM(start_offset, TD_PSW_CC) & 0xe) ||
- ((relative_frame_number < frame_count) &&
+ if (!(OHCI_BM(start_offset, TD_PSW_CC) & 0xe) ||
+ ((relative_frame_number < frame_count) &&
!(OHCI_BM(next_offset, TD_PSW_CC) & 0xe))) {
trace_usb_ohci_iso_td_bad_cc_not_accessed(start_offset, next_offset);
return 1;
@@ -801,8 +822,9 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed)
iso_td.next = ohci->done;
ohci->done = addr;
i = OHCI_BM(iso_td.flags, TD_DI);
- if (i < ohci->done_count)
+ if (i < ohci->done_count) {
ohci->done_count = i;
+ }
}
if (ohci_put_iso_td(ohci, addr, &iso_td)) {
ohci_die(ohci);
@@ -845,9 +867,10 @@ static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len)
}
}
-/* Service a transport descriptor.
- Returns nonzero to terminate processing of this endpoint. */
-
+/*
+ * Service a transport descriptor.
+ * Returns nonzero to terminate processing of this endpoint.
+ */
static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
{
int dir;
@@ -869,7 +892,7 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
return 1;
}
- /* See if this TD has already been submitted to the device. */
+ /* See if this TD has already been submitted to the device. */
completion = (addr == ohci->async_td);
if (completion && !ohci->async_complete) {
trace_usb_ohci_td_skip_async();
@@ -885,7 +908,7 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
switch (dir) {
case OHCI_TD_DIR_OUT:
case OHCI_TD_DIR_IN:
- /* Same value. */
+ /* Same value. */
break;
default:
dir = OHCI_BM(td.flags, TD_DP);
@@ -956,11 +979,12 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
}
ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN));
if (ohci->async_td) {
- /* ??? The hardware should allow one active packet per
- endpoint. We only allow one active packet per controller.
- This should be sufficient as long as devices respond in a
- timely manner.
- */
+ /*
+ * ??? The hardware should allow one active packet per
+ * endpoint. We only allow one active packet per controller.
+ * This should be sufficient as long as devices respond in a
+ * timely manner.
+ */
trace_usb_ohci_td_too_many_pending(ep->nr);
return 1;
}
@@ -996,7 +1020,7 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
/* Writeback */
if (ret == pktlen || (dir == OHCI_TD_DIR_IN && ret >= 0 && flag_r)) {
- /* Transmission succeeded. */
+ /* Transmission succeeded. */
if (ret == len) {
td.cbp = 0;
} else {
@@ -1018,8 +1042,9 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
/* Setting ED_C is part of the TD retirement process */
ed->head &= ~OHCI_ED_C;
- if (td.flags & OHCI_TD_T0)
+ if (td.flags & OHCI_TD_T0) {
ed->head |= OHCI_ED_C;
+ }
} else {
if (ret >= 0) {
trace_usb_ohci_td_underrun();
@@ -1048,8 +1073,10 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
OHCI_SET_BM(td.flags, TD_EC, 3);
break;
}
- /* An error occurred so we have to clear the interrupt counter. See
- * spec at 6.4.4 on page 104 */
+ /*
+ * An error occurred so we have to clear the interrupt counter.
+ * See spec at 6.4.4 on page 104
+ */
ohci->done_count = 0;
}
ed->head |= OHCI_ED_H;
@@ -1061,8 +1088,9 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
td.next = ohci->done;
ohci->done = addr;
i = OHCI_BM(td.flags, TD_DI);
- if (i < ohci->done_count)
+ if (i < ohci->done_count) {
ohci->done_count = i;
+ }
exit_no_retire:
if (ohci_put_td(ohci, addr, &td)) {
ohci_die(ohci);
@@ -1071,7 +1099,7 @@ exit_no_retire:
return OHCI_BM(td.flags, TD_CC) != OHCI_CC_NOERROR;
}
-/* Service an endpoint list. Returns nonzero if active TD were found. */
+/* Service an endpoint list. Returns nonzero if active TD were found. */
static int ohci_service_ed_list(OHCIState *ohci, uint32_t head)
{
struct ohci_ed ed;
@@ -1081,9 +1109,9 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head)
uint32_t link_cnt = 0;
active = 0;
- if (head == 0)
+ if (head == 0) {
return 0;
-
+ }
for (cur = head; cur && link_cnt++ < ED_LINK_LIMIT; cur = next_ed) {
if (ohci_read_ed(ohci, cur, &ed)) {
trace_usb_ohci_ed_read_error(cur);
@@ -1095,7 +1123,7 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head)
if ((ed.head & OHCI_ED_H) || (ed.flags & OHCI_ED_K)) {
uint32_t addr;
- /* Cancel pending packets for ED that have been paused. */
+ /* Cancel pending packets for ED that have been paused. */
addr = ed.head & OHCI_DPTR_MASK;
if (ohci->async_td && addr == ohci->async_td) {
usb_cancel_packet(&ohci->usb_packet);
@@ -1112,15 +1140,16 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head)
ed.tail & OHCI_DPTR_MASK, ed.next & OHCI_DPTR_MASK);
trace_usb_ohci_ed_pkt_flags(
OHCI_BM(ed.flags, ED_FA), OHCI_BM(ed.flags, ED_EN),
- OHCI_BM(ed.flags, ED_D), (ed.flags & OHCI_ED_S)!= 0,
+ OHCI_BM(ed.flags, ED_D), (ed.flags & OHCI_ED_S) != 0,
(ed.flags & OHCI_ED_K) != 0, (ed.flags & OHCI_ED_F) != 0,
OHCI_BM(ed.flags, ED_MPS));
active = 1;
if ((ed.flags & OHCI_ED_F) == 0) {
- if (ohci_service_td(ohci, &ed))
+ if (ohci_service_td(ohci, &ed)) {
break;
+ }
} else {
/* Handle isochronous endpoints */
if (ohci_service_iso_td(ohci, &ed)) {
@@ -1151,7 +1180,7 @@ static void ohci_sof(OHCIState *ohci)
ohci_set_interrupt(ohci, OHCI_INTR_SF);
}
-/* Process Control and Bulk lists. */
+/* Process Control and Bulk lists. */
static void ohci_process_lists(OHCIState *ohci)
{
if ((ohci->ctl & OHCI_CTL_CLE) && (ohci->status & OHCI_STATUS_CLF)) {
@@ -1192,7 +1221,7 @@ static void ohci_frame_boundary(void *opaque)
ohci_service_ed_list(ohci, le32_to_cpu(hcca.intr[n]));
}
- /* Cancel all pending packets if either of the lists has been disabled. */
+ /* Cancel all pending packets if either of the lists has been disabled. */
if (ohci->old_ctl & (~ohci->ctl) & (OHCI_CTL_BLE | OHCI_CTL_CLE)) {
ohci_stop_endpoints(ohci);
}
@@ -1212,19 +1241,21 @@ static void ohci_frame_boundary(void *opaque)
hcca.frame = cpu_to_le16(ohci->frame_number);
if (ohci->done_count == 0 && !(ohci->intr_status & OHCI_INTR_WD)) {
- if (!ohci->done)
+ if (!ohci->done) {
abort();
- if (ohci->intr & ohci->intr_status)
+ }
+ if (ohci->intr & ohci->intr_status) {
ohci->done |= 1;
+ }
hcca.done = cpu_to_le32(ohci->done);
ohci->done = 0;
ohci->done_count = 7;
ohci_set_interrupt(ohci, OHCI_INTR_WD);
}
- if (ohci->done_count != 7 && ohci->done_count != 0)
+ if (ohci->done_count != 7 && ohci->done_count != 0) {
ohci->done_count--;
-
+ }
/* Do SOF stuff here */
ohci_sof(ohci);
@@ -1234,18 +1265,17 @@ static void ohci_frame_boundary(void *opaque)
}
}
-/* Start sending SOF tokens across the USB bus, lists are processed in
+/*
+ * Start sending SOF tokens across the USB bus, lists are processed in
* next frame
*/
static int ohci_bus_start(OHCIState *ohci)
{
trace_usb_ohci_start(ohci->name);
-
- /* Delay the first SOF event by one frame time as
- * linux driver is not ready to receive it and
- * can meet some race conditions
+ /*
+ * Delay the first SOF event by one frame time as linux driver is
+ * not ready to receive it and can meet some race conditions
*/
-
ohci->sof_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ohci_eof_timer(ohci);
@@ -1259,39 +1289,7 @@ void ohci_bus_stop(OHCIState *ohci)
timer_del(ohci->eof_timer);
}
-/* Sets a flag in a port status register but only set it if the port is
- * connected, if not set ConnectStatusChange flag. If flag is enabled
- * return 1.
- */
-static int ohci_port_set_if_connected(OHCIState *ohci, int i, uint32_t val)
-{
- int ret = 1;
-
- /* writing a 0 has no effect */
- if (val == 0)
- return 0;
-
- /* If CurrentConnectStatus is cleared we set
- * ConnectStatusChange
- */
- if (!(ohci->rhport[i].ctrl & OHCI_PORT_CCS)) {
- ohci->rhport[i].ctrl |= OHCI_PORT_CSC;
- if (ohci->rhstatus & OHCI_RHS_DRWE) {
- /* TODO: CSC is a wakeup event */
- }
- return 0;
- }
-
- if (ohci->rhport[i].ctrl & val)
- ret = 0;
-
- /* set the bit */
- ohci->rhport[i].ctrl |= val;
-
- return ret;
-}
-
-/* Set the frame interval - frame interval toggle is manipulated by the hcd only */
+/* Frame interval toggle is manipulated by the hcd only */
static void ohci_set_frame_interval(OHCIState *ohci, uint16_t val)
{
val &= OHCI_FMI_FI;
@@ -1308,10 +1306,8 @@ static void ohci_port_power(OHCIState *ohci, int i, int p)
if (p) {
ohci->rhport[i].ctrl |= OHCI_PORT_PPS;
} else {
- ohci->rhport[i].ctrl &= ~(OHCI_PORT_PPS|
- OHCI_PORT_CCS|
- OHCI_PORT_PSS|
- OHCI_PORT_PRS);
+ ohci->rhport[i].ctrl &= ~(OHCI_PORT_PPS | OHCI_PORT_CCS |
+ OHCI_PORT_PSS | OHCI_PORT_PRS);
}
}
@@ -1326,9 +1322,9 @@ static void ohci_set_ctl(OHCIState *ohci, uint32_t val)
new_state = ohci->ctl & OHCI_CTL_HCFS;
/* no state change */
- if (old_state == new_state)
+ if (old_state == new_state) {
return;
-
+ }
trace_usb_ohci_set_ctl(ohci->name, new_state);
switch (new_state) {
case OHCI_USB_OPERATIONAL:
@@ -1354,21 +1350,19 @@ static uint32_t ohci_get_frame_remaining(OHCIState *ohci)
uint16_t fr;
int64_t tks;
- if ((ohci->ctl & OHCI_CTL_HCFS) != OHCI_USB_OPERATIONAL)
- return (ohci->frt << 31);
-
- /* Being in USB operational state guarnatees sof_time was
- * set already.
- */
+ if ((ohci->ctl & OHCI_CTL_HCFS) != OHCI_USB_OPERATIONAL) {
+ return ohci->frt << 31;
+ }
+ /* Being in USB operational state guarnatees sof_time was set already. */
tks = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - ohci->sof_time;
if (tks < 0) {
tks = 0;
}
/* avoid muldiv if possible */
- if (tks >= usb_frame_time)
- return (ohci->frt << 31);
-
+ if (tks >= usb_frame_time) {
+ return ohci->frt << 31;
+ }
tks = tks / usb_bit_time;
fr = (uint16_t)(ohci->fi - tks);
@@ -1384,33 +1378,66 @@ static void ohci_set_hub_status(OHCIState *ohci, uint32_t val)
old_state = ohci->rhstatus;
/* write 1 to clear OCIC */
- if (val & OHCI_RHS_OCIC)
+ if (val & OHCI_RHS_OCIC) {
ohci->rhstatus &= ~OHCI_RHS_OCIC;
-
+ }
if (val & OHCI_RHS_LPS) {
int i;
- for (i = 0; i < ohci->num_ports; i++)
+ for (i = 0; i < ohci->num_ports; i++) {
ohci_port_power(ohci, i, 0);
+ }
trace_usb_ohci_hub_power_down();
}
if (val & OHCI_RHS_LPSC) {
int i;
- for (i = 0; i < ohci->num_ports; i++)
+ for (i = 0; i < ohci->num_ports; i++) {
ohci_port_power(ohci, i, 1);
+ }
trace_usb_ohci_hub_power_up();
}
- if (val & OHCI_RHS_DRWE)
+ if (val & OHCI_RHS_DRWE) {
ohci->rhstatus |= OHCI_RHS_DRWE;
-
- if (val & OHCI_RHS_CRWE)
+ }
+ if (val & OHCI_RHS_CRWE) {
ohci->rhstatus &= ~OHCI_RHS_DRWE;
-
- if (old_state != ohci->rhstatus)
+ }
+ if (old_state != ohci->rhstatus) {
ohci_set_interrupt(ohci, OHCI_INTR_RHSC);
+ }
+}
+
+/*
+ * Sets a flag in a port status reg but only set it if the port is connected.
+ * If not set ConnectStatusChange flag. If flag is enabled return 1.
+ */
+static int ohci_port_set_if_connected(OHCIState *ohci, int i, uint32_t val)
+{
+ int ret = 1;
+
+ /* writing a 0 has no effect */
+ if (val == 0) {
+ return 0;
+ }
+ /* If CurrentConnectStatus is cleared we set ConnectStatusChange */
+ if (!(ohci->rhport[i].ctrl & OHCI_PORT_CCS)) {
+ ohci->rhport[i].ctrl |= OHCI_PORT_CSC;
+ if (ohci->rhstatus & OHCI_RHS_DRWE) {
+ /* TODO: CSC is a wakeup event */
+ }
+ return 0;
+ }
+
+ if (ohci->rhport[i].ctrl & val) {
+ ret = 0;
+ }
+ /* set the bit */
+ ohci->rhport[i].ctrl |= val;
+
+ return ret;
}
/* Set root hub port status */
@@ -1423,12 +1450,12 @@ static void ohci_port_set_status(OHCIState *ohci, int portnum, uint32_t val)
old_state = port->ctrl;
/* Write to clear CSC, PESC, PSSC, OCIC, PRSC */
- if (val & OHCI_PORT_WTC)
+ if (val & OHCI_PORT_WTC) {
port->ctrl &= ~(val & OHCI_PORT_WTC);
-
- if (val & OHCI_PORT_CCS)
+ }
+ if (val & OHCI_PORT_CCS) {
port->ctrl &= ~OHCI_PORT_PES;
-
+ }
ohci_port_set_if_connected(ohci, portnum, val & OHCI_PORT_PES);
if (ohci_port_set_if_connected(ohci, portnum, val & OHCI_PORT_PSS)) {
@@ -1439,20 +1466,20 @@ static void ohci_port_set_status(OHCIState *ohci, int portnum, uint32_t val)
trace_usb_ohci_port_reset(portnum);
usb_device_reset(port->port.dev);
port->ctrl &= ~OHCI_PORT_PRS;
- /* ??? Should this also set OHCI_PORT_PESC. */
+ /* ??? Should this also set OHCI_PORT_PESC. */
port->ctrl |= OHCI_PORT_PES | OHCI_PORT_PRSC;
}
- /* Invert order here to ensure in ambiguous case, device is
- * powered up...
- */
- if (val & OHCI_PORT_LSDA)
+ /* Invert order here to ensure in ambiguous case, device is powered up. */
+ if (val & OHCI_PORT_LSDA) {
ohci_port_power(ohci, portnum, 0);
- if (val & OHCI_PORT_PPS)
+ }
+ if (val & OHCI_PORT_PPS) {
ohci_port_power(ohci, portnum, 1);
-
- if (old_state != port->ctrl)
+ }
+ if (old_state != port->ctrl) {
ohci_set_interrupt(ohci, OHCI_INTR_RHSC);
+ }
}
static uint64_t ohci_mem_read(void *opaque,
@@ -1469,6 +1496,8 @@ static uint64_t ohci_mem_read(void *opaque,
} else if (addr >= 0x54 && addr < 0x54 + ohci->num_ports * 4) {
/* HcRhPortStatus */
retval = ohci->rhport[(addr - 0x54) >> 2].ctrl | OHCI_PORT_PPS;
+ trace_usb_ohci_mem_port_read(size, "HcRhPortStatus", (addr - 0x50) >> 2,
+ addr, addr >> 2, retval);
} else {
switch (addr >> 2) {
case 0: /* HcRevision */
@@ -1573,6 +1602,10 @@ static uint64_t ohci_mem_read(void *opaque,
trace_usb_ohci_mem_read_bad_offset(addr);
retval = 0xffffffff;
}
+ if (addr != 0xc || retval) {
+ trace_usb_ohci_mem_read(size, ohci_reg_name(addr), addr, addr >> 2,
+ retval);
+ }
}
return retval;
@@ -1593,10 +1626,13 @@ static void ohci_mem_write(void *opaque,
if (addr >= 0x54 && addr < 0x54 + ohci->num_ports * 4) {
/* HcRhPortStatus */
+ trace_usb_ohci_mem_port_write(size, "HcRhPortStatus",
+ (addr - 0x50) >> 2, addr, addr >> 2, val);
ohci_port_set_status(ohci, (addr - 0x54) >> 2, val);
return;
}
+ trace_usb_ohci_mem_write(size, ohci_reg_name(addr), addr, addr >> 2, val);
switch (addr >> 2) {
case 1: /* HcControl */
ohci_set_ctl(ohci, val);
@@ -1609,8 +1645,9 @@ static void ohci_mem_write(void *opaque,
/* Bits written as '0' remain unchanged in the register */
ohci->status |= val;
- if (ohci->status & OHCI_STATUS_HCR)
+ if (ohci->status & OHCI_STATUS_HCR) {
ohci_soft_reset(ohci);
+ }
break;
case 3: /* HcInterruptStatus */
@@ -1688,8 +1725,9 @@ static void ohci_mem_write(void *opaque,
case 25: /* HcHReset */
ohci->hreset = val & ~OHCI_HRESET_FSBIR;
- if (val & OHCI_HRESET_FSBIR)
+ if (val & OHCI_HRESET_FSBIR) {
ohci_hard_reset(ohci);
+ }
break;
case 26: /* HcHInterruptEnable */
@@ -1827,7 +1865,7 @@ static USBBusOps ohci_bus_ops = {
void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports,
dma_addr_t localmem_base, char *masterbus,
uint32_t firstport, AddressSpace *as,
- void (*ohci_die_fn)(struct OHCIState *), Error **errp)
+ void (*ohci_die_fn)(OHCIState *), Error **errp)
{
Error *err = NULL;
int i;
@@ -1859,7 +1897,7 @@ void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports,
ohci->num_ports = num_ports;
if (masterbus) {
USBPort *ports[OHCI_MAX_PORTS];
- for(i = 0; i < num_ports; i++) {
+ for (i = 0; i < num_ports; i++) {
ports[i] = &ohci->rhport[i].port;
}
usb_register_companion(masterbus, ports, num_ports,
@@ -1892,7 +1930,7 @@ void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports,
ohci_frame_boundary, ohci);
}
-/**
+/*
* A typical OHCI will stop operating and set itself into error state
* (which can be queried by MMIO) to signal that it got an error.
*/
diff --git a/hw/usb/hcd-ohci.h b/hw/usb/hcd-ohci.h
index 11ac57058d..e1827227ac 100644
--- a/hw/usb/hcd-ohci.h
+++ b/hw/usb/hcd-ohci.h
@@ -21,6 +21,7 @@
#ifndef HCD_OHCI_H
#define HCD_OHCI_H
+#include "hw/sysbus.h"
#include "sysemu/dma.h"
#include "hw/usb.h"
#include "qom/object.h"
@@ -33,7 +34,9 @@ typedef struct OHCIPort {
uint32_t ctrl;
} OHCIPort;
-typedef struct OHCIState {
+typedef struct OHCIState OHCIState;
+
+struct OHCIState {
USBBus bus;
qemu_irq irq;
MemoryRegion mem;
@@ -89,8 +92,8 @@ typedef struct OHCIState {
uint32_t async_td;
bool async_complete;
- void (*ohci_die)(struct OHCIState *ohci);
-} OHCIState;
+ void (*ohci_die)(OHCIState *ohci);
+};
#define TYPE_SYSBUS_OHCI "sysbus-ohci"
OBJECT_DECLARE_SIMPLE_TYPE(OHCISysBusState, SYSBUS_OHCI)
@@ -112,7 +115,7 @@ extern const VMStateDescription vmstate_ohci_state;
void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports,
dma_addr_t localmem_base, char *masterbus,
uint32_t firstport, AddressSpace *as,
- void (*ohci_die_fn)(struct OHCIState *), Error **errp);
+ void (*ohci_die_fn)(OHCIState *), Error **errp);
void ohci_bus_stop(OHCIState *ohci);
void ohci_stop_endpoints(OHCIState *ohci);
void ohci_hard_reset(OHCIState *ohci);
diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c
index 30ae0104bb..8ac1175ad2 100644
--- a/hw/usb/hcd-uhci.c
+++ b/hw/usb/hcd-uhci.c
@@ -60,9 +60,7 @@ enum {
TD_RESULT_ASYNC_CONT,
};
-typedef struct UHCIState UHCIState;
typedef struct UHCIAsync UHCIAsync;
-typedef struct UHCIPCIDeviceClass UHCIPCIDeviceClass;
struct UHCIPCIDeviceClass {
PCIDeviceClass parent_class;
@@ -1161,8 +1159,7 @@ static USBBusOps uhci_bus_ops = {
void usb_uhci_common_realize(PCIDevice *dev, Error **errp)
{
Error *err = NULL;
- PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
- UHCIPCIDeviceClass *u = container_of(pc, UHCIPCIDeviceClass, parent_class);
+ UHCIPCIDeviceClass *u = UHCI_GET_CLASS(dev);
UHCIState *s = UHCI(dev);
uint8_t *pci_conf = s->dev.config;
int i;
@@ -1269,7 +1266,7 @@ void uhci_data_class_init(ObjectClass *klass, void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- UHCIPCIDeviceClass *u = container_of(k, UHCIPCIDeviceClass, parent_class);
+ UHCIPCIDeviceClass *u = UHCI_CLASS(klass);
UHCIInfo *info = data;
k->realize = info->realize ? info->realize : usb_uhci_common_realize;
diff --git a/hw/usb/hcd-uhci.h b/hw/usb/hcd-uhci.h
index e0fdb98ef1..69f8b40c49 100644
--- a/hw/usb/hcd-uhci.h
+++ b/hw/usb/hcd-uhci.h
@@ -75,7 +75,7 @@ typedef struct UHCIState {
} UHCIState;
#define TYPE_UHCI "pci-uhci-usb"
-DECLARE_INSTANCE_CHECKER(UHCIState, UHCI, TYPE_UHCI)
+OBJECT_DECLARE_TYPE(UHCIState, UHCIPCIDeviceClass, UHCI)
typedef struct UHCIInfo {
const char *name;
diff --git a/hw/usb/hcd-xhci-nec.c b/hw/usb/hcd-xhci-nec.c
index 13c9ac5dbd..328e5bfe7c 100644
--- a/hw/usb/hcd-xhci-nec.c
+++ b/hw/usb/hcd-xhci-nec.c
@@ -27,14 +27,16 @@
#include "hcd-xhci-pci.h"
-typedef struct XHCINecState {
+OBJECT_DECLARE_SIMPLE_TYPE(XHCINecState, NEC_XHCI)
+
+struct XHCINecState {
/*< private >*/
XHCIPciState parent_obj;
/*< public >*/
uint32_t flags;
uint32_t intrs;
uint32_t slots;
-} XHCINecState;
+};
static Property nec_xhci_properties[] = {
DEFINE_PROP_ON_OFF_AUTO("msi", XHCIPciState, msi, ON_OFF_AUTO_AUTO),
@@ -51,7 +53,7 @@ static Property nec_xhci_properties[] = {
static void nec_xhci_instance_init(Object *obj)
{
XHCIPciState *pci = XHCI_PCI(obj);
- XHCINecState *nec = container_of(pci, XHCINecState, parent_obj);
+ XHCINecState *nec = NEC_XHCI(obj);
pci->xhci.flags = nec->flags;
pci->xhci.numintrs = nec->intrs;
diff --git a/hw/usb/trace-events b/hw/usb/trace-events
index b65269892c..6bb9655c8d 100644
--- a/hw/usb/trace-events
+++ b/hw/usb/trace-events
@@ -57,8 +57,12 @@ usb_ohci_ed_read_error(uint32_t addr) "ED read error at 0x%x"
usb_ohci_ed_pkt(uint32_t cur, int h, int c, uint32_t head, uint32_t tail, uint32_t next) "ED @ 0x%.8x h=%u c=%u\n head=0x%.8x tailp=0x%.8x next=0x%.8x"
usb_ohci_ed_pkt_flags(uint32_t fa, uint32_t en, uint32_t d, int s, int k, int f, uint32_t mps) "fa=%u en=%u d=%u s=%u k=%u f=%u mps=%u"
usb_ohci_hcca_read_error(uint32_t addr) "HCCA read error at 0x%x"
+usb_ohci_mem_read(uint32_t size, const char *name, uint32_t addr, uint32_t offs, uint32_t val) "%d %s 0x%x %d -> 0x%x"
+usb_ohci_mem_port_read(uint32_t size, const char *name, uint32_t port, uint32_t addr, uint32_t offs, uint32_t val) "%d %s[%d] 0x%x %d -> 0x%x"
usb_ohci_mem_read_unaligned(uint32_t addr) "at 0x%x"
usb_ohci_mem_read_bad_offset(uint32_t addr) "0x%x"
+usb_ohci_mem_write(uint32_t size, const char *name, uint32_t addr, uint32_t offs, uint32_t val) "%d %s 0x%x %d <- 0x%x"
+usb_ohci_mem_port_write(uint32_t size, const char *name, uint32_t port, uint32_t addr, uint32_t offs, uint32_t val) "%d %s[%d] 0x%x %d <- 0x%x"
usb_ohci_mem_write_unaligned(uint32_t addr) "at 0x%x"
usb_ohci_mem_write_bad_offset(uint32_t addr) "0x%x"
usb_ohci_process_lists(uint32_t head, uint32_t cur) "head 0x%x, cur 0x%x"
diff --git a/hw/usb/u2f.h b/hw/usb/u2f.h
index a408a82927..8bff13141a 100644
--- a/hw/usb/u2f.h
+++ b/hw/usb/u2f.h
@@ -31,22 +31,16 @@
#define U2FHID_PACKET_SIZE 64
#define U2FHID_PENDING_IN_NUM 32
-typedef struct U2FKeyState U2FKeyState;
typedef struct U2FKeyInfo U2FKeyInfo;
#define TYPE_U2F_KEY "u2f-key"
-#define U2F_KEY(obj) \
- OBJECT_CHECK(U2FKeyState, (obj), TYPE_U2F_KEY)
-#define U2F_KEY_CLASS(klass) \
- OBJECT_CLASS_CHECK(U2FKeyClass, (klass), TYPE_U2F_KEY)
-#define U2F_KEY_GET_CLASS(obj) \
- OBJECT_GET_CLASS(U2FKeyClass, (obj), TYPE_U2F_KEY)
+OBJECT_DECLARE_TYPE(U2FKeyState, U2FKeyClass, U2F_KEY)
/*
* Callbacks to be used by the U2F key base device (i.e. hw/u2f.c)
* to interact with its variants (i.e. hw/u2f-*.c)
*/
-typedef struct U2FKeyClass {
+struct U2FKeyClass {
/*< private >*/
USBDeviceClass parent_class;
@@ -55,12 +49,12 @@ typedef struct U2FKeyClass {
const uint8_t packet[U2FHID_PACKET_SIZE]);
void (*realize)(U2FKeyState *key, Error **errp);
void (*unrealize)(U2FKeyState *key);
-} U2FKeyClass;
+};
/*
* State of the U2F key base device (i.e. hw/u2f.c)
*/
-typedef struct U2FKeyState {
+struct U2FKeyState {
USBDevice dev;
USBEndpoint *ep;
uint8_t idle;
@@ -70,7 +64,7 @@ typedef struct U2FKeyState {
uint8_t pending_in_start;
uint8_t pending_in_end;
uint8_t pending_in_num;
-} U2FKeyState;
+};
/*
* API to be used by the U2F key device variants (i.e. hw/u2f-*.c)
diff --git a/hw/xen/Kconfig b/hw/xen/Kconfig
new file mode 100644
index 0000000000..3467efb986
--- /dev/null
+++ b/hw/xen/Kconfig
@@ -0,0 +1,3 @@
+config XEN_BUS
+ bool
+ default y if (XEN || XEN_EMU)
diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c
index 085fd31ef7..afba71f6eb 100644
--- a/hw/xen/xen-legacy-backend.c
+++ b/hw/xen/xen-legacy-backend.c
@@ -676,21 +676,30 @@ void xenstore_update_fe(char *watch, struct XenLegacyDevice *xendev)
}
/* -------------------------------------------------------------------- */
-int xen_be_init(void)
+static void xen_set_dynamic_sysbus(void)
+{
+ Object *machine = qdev_get_machine();
+ ObjectClass *oc = object_get_class(machine);
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ machine_class_allow_dynamic_sysbus_dev(mc, TYPE_XENSYSDEV);
+}
+
+void xen_be_init(void)
{
xengnttab_handle *gnttabdev;
xenstore = xs_daemon_open();
if (!xenstore) {
xen_pv_printf(NULL, 0, "can't connect to xenstored\n");
- return -1;
+ exit(1);
}
qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL);
if (xen_xc == NULL || xen_fmem == NULL) {
- /* Check if xen_init() have been called */
- goto err;
+ xen_pv_printf(NULL, 0, "Xen operations not set up\n");
+ exit(1);
}
gnttabdev = xengnttab_open(NULL, 0);
@@ -706,23 +715,16 @@ int xen_be_init(void)
xen_sysbus = qbus_new(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus");
qbus_set_bus_hotplug_handler(xen_sysbus);
- return 0;
-
-err:
- qemu_set_fd_handler(xs_fileno(xenstore), NULL, NULL, NULL);
- xs_daemon_close(xenstore);
- xenstore = NULL;
-
- return -1;
-}
-
-static void xen_set_dynamic_sysbus(void)
-{
- Object *machine = qdev_get_machine();
- ObjectClass *oc = object_get_class(machine);
- MachineClass *mc = MACHINE_CLASS(oc);
+ xen_set_dynamic_sysbus();
- machine_class_allow_dynamic_sysbus_dev(mc, TYPE_XENSYSDEV);
+ xen_be_register("console", &xen_console_ops);
+ xen_be_register("vkbd", &xen_kbdmouse_ops);
+#ifdef CONFIG_VIRTFS
+ xen_be_register("9pfs", &xen_9pfs_ops);
+#endif
+#ifdef CONFIG_USB_LIBUSB
+ xen_be_register("qusb", &xen_usb_ops);
+#endif
}
int xen_be_register(const char *type, struct XenDevOps *ops)
@@ -744,20 +746,6 @@ int xen_be_register(const char *type, struct XenDevOps *ops)
return xenstore_scan(type, xen_domid, ops);
}
-void xen_be_register_common(void)
-{
- xen_set_dynamic_sysbus();
-
- xen_be_register("console", &xen_console_ops);
- xen_be_register("vkbd", &xen_kbdmouse_ops);
-#ifdef CONFIG_VIRTFS
- xen_be_register("9pfs", &xen_9pfs_ops);
-#endif
-#ifdef CONFIG_USB_LIBUSB
- xen_be_register("qusb", &xen_usb_ops);
-#endif
-}
-
int xen_be_bind_evtchn(struct XenLegacyDevice *xendev)
{
if (xendev->local_port != -1) {
diff --git a/hw/xenpv/xen_machine_pv.c b/hw/xenpv/xen_machine_pv.c
index 20c9611d71..2e759d0619 100644
--- a/hw/xenpv/xen_machine_pv.c
+++ b/hw/xenpv/xen_machine_pv.c
@@ -36,10 +36,7 @@ static void xen_init_pv(MachineState *machine)
int i;
/* Initialize backend core & drivers */
- if (xen_be_init() != 0) {
- error_report("%s: xen backend core setup failed", __func__);
- exit(1);
- }
+ xen_be_init();
switch (xen_mode) {
case XEN_ATTACH:
@@ -55,7 +52,6 @@ static void xen_init_pv(MachineState *machine)
break;
}
- xen_be_register_common();
xen_be_register("vfb", &xen_framebuffer_ops);
xen_be_register("qnic", &xen_netdev_ops);