diff options
295 files changed, 11030 insertions, 4000 deletions
diff --git a/Kconfig.host b/Kconfig.host index 2ee71578f3..f496475f8e 100644 --- a/Kconfig.host +++ b/Kconfig.host @@ -11,6 +11,9 @@ config OPENGL config X11 bool +config PIXMAN + bool + config SPICE bool diff --git a/MAINTAINERS b/MAINTAINERS index a74ad6545b..934ab90f9c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -490,7 +490,7 @@ S: Supported F: include/sysemu/kvm_xen.h F: target/i386/kvm/xen* F: hw/i386/kvm/xen* -F: tests/avocado/xen_guest.py +F: tests/avocado/kvm_xen_guest.py Guest CPU Cores (other accelerators) ------------------------------------ @@ -859,8 +859,10 @@ M: Hao Wu <wuhaotsh@google.com> L: qemu-arm@nongnu.org S: Supported F: hw/*/npcm* +F: hw/sensor/adm1266.c F: include/hw/*/npcm* F: tests/qtest/npcm* +F: tests/qtest/adm1266-test.c F: pc-bios/npcm7xx_bootrom.bin F: roms/vbootrom F: docs/system/arm/nuvoton.rst @@ -1616,6 +1618,7 @@ F: hw/intc/sh_intc.c F: hw/pci-host/sh_pci.c F: hw/timer/sh_timer.c F: include/hw/sh4/sh_intc.h +F: include/hw/timer/tmu012.h Shix R: Yoshinori Sato <ysato@users.sourceforge.jp> @@ -1773,7 +1776,7 @@ F: include/hw/southbridge/ich9.h F: include/hw/southbridge/piix.h F: hw/isa/apm.c F: include/hw/isa/apm.h -F: tests/unit/test-x86-cpuid.c +F: tests/unit/test-x86-topo.c F: tests/qtest/test-x86-cpuid-compat.c PC Chipset @@ -1859,6 +1862,7 @@ M: Max Filippov <jcmvbkbc@gmail.com> S: Maintained F: hw/xtensa/xtfpga.c F: hw/net/opencores_eth.c +F: include/hw/xtensa/mx_pic.h Devices ------- @@ -2311,6 +2315,15 @@ F: hw/virtio/virtio-mem-pci.h F: hw/virtio/virtio-mem-pci.c F: include/hw/virtio/virtio-mem.h +virtio-snd +M: Gerd Hoffmann <kraxel@redhat.com> +R: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> +S: Supported +F: hw/audio/virtio-snd.c +F: hw/audio/virtio-snd-pci.c +F: include/hw/audio/virtio-snd.h +F: docs/system/devices/virtio-snd.rst + nvme M: Keith Busch <kbusch@kernel.org> M: Klaus Jensen <its@irrelevant.dk> @@ -2587,6 +2600,7 @@ W: https://canbus.pages.fel.cvut.cz/ F: net/can/* F: hw/net/can/* F: include/net/can_*.h +F: docs/system/devices/can.rst OpenPIC interrupt controller M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> @@ -3142,10 +3156,11 @@ M: Michael Roth <michael.roth@amd.com> M: Konstantin Kostiuk <kkostiuk@redhat.com> S: Maintained F: qga/ +F: contrib/systemd/qemu-guest-agent.service F: docs/interop/qemu-ga.rst F: docs/interop/qemu-ga-ref.rst F: scripts/qemu-guest-agent/ -F: tests/unit/test-qga.c +F: tests/*/test-qga* T: git https://github.com/mdroth/qemu.git qga QEMU Guest Agent Win32 @@ -4055,7 +4070,7 @@ F: gitdm.config F: contrib/gitdm/* Incompatible changes -R: libvir-list@redhat.com +R: devel@lists.libvirt.org F: docs/about/deprecated.rst Build System diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c index a9e7a2d5b4..8a496a2a6f 100644 --- a/accel/stubs/tcg-stub.c +++ b/accel/stubs/tcg-stub.c @@ -22,10 +22,6 @@ void tlb_set_dirty(CPUState *cpu, vaddr vaddr) { } -void tcg_flush_jmp_cache(CPUState *cpu) -{ -} - int probe_access_flags(CPUArchState *env, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t retaddr) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index f35c5f359b..765805e70b 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -24,6 +24,7 @@ #include "exec/memory.h" #include "exec/cpu_ldst.h" #include "exec/cputlb.h" +#include "exec/tb-flush.h" #include "exec/memory-internal.h" #include "exec/ram_addr.h" #include "tcg/tcg.h" diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index d885cc1d3c..1b57290682 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -34,6 +34,7 @@ #include "qemu/timer.h" #include "exec/exec-all.h" #include "exec/hwaddr.h" +#include "exec/tb-flush.h" #include "exec/gdbstub.h" #include "tcg-accel-ops.h" @@ -77,6 +78,13 @@ int tcg_cpus_exec(CPUState *cpu) return ret; } +static void tcg_cpu_reset_hold(CPUState *cpu) +{ + tcg_flush_jmp_cache(cpu); + + tlb_flush(cpu); +} + /* mask must never be zero, except for A20 change call */ void tcg_handle_interrupt(CPUState *cpu, int mask) { @@ -205,6 +213,7 @@ static void tcg_accel_ops_init(AccelOpsClass *ops) } } + ops->cpu_reset_hold = tcg_cpu_reset_hold; ops->supports_guest_debug = tcg_supports_guest_debug; ops->insert_breakpoint = tcg_insert_breakpoint; ops->remove_breakpoint = tcg_remove_breakpoint; diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index e579b0891d..b263857ecc 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -673,11 +673,3 @@ void tcg_flush_jmp_cache(CPUState *cpu) qatomic_set(&jc->array[i].tb, NULL); } } - -/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ -void tcg_flush_softmmu_tlb(CPUState *cs) -{ -#ifdef CONFIG_SOFTMMU - tlb_flush(cs); -#endif -} diff --git a/accel/tcg/user-exec-stub.c b/accel/tcg/user-exec-stub.c index 2dc6fd9c4e..4fbe2dbdc8 100644 --- a/accel/tcg/user-exec-stub.c +++ b/accel/tcg/user-exec-stub.c @@ -14,6 +14,10 @@ void qemu_init_vcpu(CPUState *cpu) { } +void cpu_exec_reset_hold(CPUState *cpu) +{ +} + /* User mode emulation does not support record/replay yet. */ bool replay_exception(void) diff --git a/blockdev.c b/blockdev.c index 1517dc6210..e9b7e38dc4 100644 --- a/blockdev.c +++ b/blockdev.c @@ -255,13 +255,13 @@ void drive_check_orphaned(void) * Ignore default drives, because we create certain default * drives unconditionally, then leave them unclaimed. Not the * users fault. - * Ignore IF_VIRTIO, because it gets desugared into -device, - * so we can leave failing to -device. + * Ignore IF_VIRTIO or IF_XEN, because it gets desugared into + * -device, so we can leave failing to -device. * Ignore IF_NONE, because leaving unclaimed IF_NONE remains * available for device_add is a feature. */ if (dinfo->is_default || dinfo->type == IF_VIRTIO - || dinfo->type == IF_NONE) { + || dinfo->type == IF_XEN || dinfo->type == IF_NONE) { continue; } if (!blk_get_attached_dev(blk)) { @@ -977,6 +977,15 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type, qemu_opt_set(devopts, "driver", "virtio-blk", &error_abort); qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"), &error_abort); + } else if (type == IF_XEN) { + QemuOpts *devopts; + devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0, + &error_abort); + qemu_opt_set(devopts, "driver", + (media == MEDIA_CDROM) ? "xen-cdrom" : "xen-disk", + &error_abort); + qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"), + &error_abort); } filename = qemu_opt_get(legacy_opts, "file"); diff --git a/bsd-user/main.c b/bsd-user/main.c index c402fadf46..e6014f517e 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -118,7 +118,7 @@ void fork_end(int child) */ CPU_FOREACH_SAFE(cpu, next_cpu) { if (cpu != thread_cpu) { - QTAILQ_REMOVE_RCU(&cpus, cpu, node); + QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node); } } mmap_fork_end(child); diff --git a/configs/targets/hppa-linux-user.mak b/configs/targets/hppa-linux-user.mak index 361ea39d71..8e0a80492f 100644 --- a/configs/targets/hppa-linux-user.mak +++ b/configs/targets/hppa-linux-user.mak @@ -1,4 +1,5 @@ TARGET_ARCH=hppa +TARGET_ABI32=y TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y diff --git a/cpu-common.c b/cpu-common.c index 45c745ecf6..c81fd72d16 100644 --- a/cpu-common.c +++ b/cpu-common.c @@ -73,7 +73,7 @@ static int cpu_get_free_index(void) return max_cpu_index; } -CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); +CPUTailQ cpus_queue = QTAILQ_HEAD_INITIALIZER(cpus_queue); static unsigned int cpu_list_generation_id; unsigned int cpu_list_generation_id_get(void) @@ -90,7 +90,7 @@ void cpu_list_add(CPUState *cpu) } else { assert(!cpu_index_auto_assigned); } - QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node); + QTAILQ_INSERT_TAIL_RCU(&cpus_queue, cpu, node); cpu_list_generation_id++; } @@ -102,7 +102,7 @@ void cpu_list_remove(CPUState *cpu) return; } - QTAILQ_REMOVE_RCU(&cpus, cpu, node); + QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node); cpu->cpu_index = UNASSIGNED_CPU_INDEX; cpu_list_generation_id++; } diff --git a/cpu-target.c b/cpu-target.c index 79363ae370..f3e1ad8bcd 100644 --- a/cpu-target.c +++ b/cpu-target.c @@ -131,13 +131,13 @@ const VMStateDescription vmstate_cpu_common = { }; #endif -void cpu_exec_realizefn(CPUState *cpu, Error **errp) +bool cpu_exec_realizefn(CPUState *cpu, Error **errp) { /* cache the cpu class for the hotpath */ cpu->cc = CPU_GET_CLASS(cpu); if (!accel_cpu_common_realize(cpu, errp)) { - return; + return false; } /* Wait until cpu initialization complete before exposing cpu. */ @@ -159,6 +159,8 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp) vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu); } #endif /* CONFIG_USER_ONLY */ + + return true; } void cpu_exec_unrealizefn(CPUState *cpu) diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst index 768fb5c28c..9f1103f85a 100644 --- a/docs/interop/vhost-user.rst +++ b/docs/interop/vhost-user.rst @@ -108,6 +108,43 @@ A vring state description :num: a 32-bit number +A vring descriptor index for split virtqueues +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ++-------------+---------------------+ +| vring index | index in avail ring | ++-------------+---------------------+ + +:vring index: 32-bit index of the respective virtqueue + +:index in avail ring: 32-bit value, of which currently only the lower 16 + bits are used: + + - Bits 0–15: Index of the next *Available Ring* descriptor that the + back-end will process. This is a free-running index that is not + wrapped by the ring size. + - Bits 16–31: Reserved (set to zero) + +Vring descriptor indices for packed virtqueues +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ++-------------+--------------------+ +| vring index | descriptor indices | ++-------------+--------------------+ + +:vring index: 32-bit index of the respective virtqueue + +:descriptor indices: 32-bit value: + + - Bits 0–14: Index of the next *Available Ring* descriptor that the + back-end will process. This is a free-running index that is not + wrapped by the ring size. + - Bit 15: Driver (Available) Ring Wrap Counter + - Bits 16–30: Index of the entry in the *Used Ring* where the back-end + will place the next descriptor. This is a free-running index that + is not wrapped by the ring size. + - Bit 31: Device (Used) Ring Wrap Counter + A vring address description ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -285,6 +322,32 @@ VhostUserShared :UUID: 16 bytes UUID, whose first three components (a 32-bit value, then two 16-bit values) are stored in big endian. +Device state transfer parameters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ++--------------------+-----------------+ +| transfer direction | migration phase | ++--------------------+-----------------+ + +:transfer direction: a 32-bit enum, describing the direction in which + the state is transferred: + + - 0: Save: Transfer the state from the back-end to the front-end, + which happens on the source side of migration + - 1: Load: Transfer the state from the front-end to the back-end, + which happens on the destination side of migration + +:migration phase: a 32-bit enum, describing the state in which the VM + guest and devices are: + + - 0: Stopped (in the period after the transfer of memory-mapped + regions before switch-over to the destination): The VM guest is + stopped, and the vhost-user device is suspended (see + :ref:`Suspended device state <suspended_device_state>`). + + In the future, additional phases might be added e.g. to allow + iterative migration while the device is running. + C structure ----------- @@ -344,6 +407,7 @@ in the ancillary data: * ``VHOST_USER_SET_VRING_ERR`` * ``VHOST_USER_SET_BACKEND_REQ_FD`` (previous name ``VHOST_USER_SET_SLAVE_REQ_FD``) * ``VHOST_USER_SET_INFLIGHT_FD`` (if ``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD``) +* ``VHOST_USER_SET_DEVICE_STATE_FD`` If *front-end* is unable to send the full message or receives a wrong reply it will close the connection. An optional reconnection mechanism @@ -374,35 +438,50 @@ negotiation. Ring states ----------- -Rings can be in one of three states: +Rings have two independent states: started/stopped, and enabled/disabled. -* stopped: the back-end must not process the ring at all. +* While a ring is stopped, the back-end must not process the ring at + all, regardless of whether it is enabled or disabled. The + enabled/disabled state should still be tracked, though, so it can come + into effect once the ring is started. -* started but disabled: the back-end must process the ring without +* started and disabled: The back-end must process the ring without causing any side effects. For example, for a networking device, in the disabled state the back-end must not supply any new RX packets, but must process and discard any TX packets. -* started and enabled. +* started and enabled: The back-end must process the ring normally, i.e. + process all requests and execute them. -Each ring is initialized in a stopped state. The back-end must start -ring upon receiving a kick (that is, detecting that file descriptor is -readable) on the descriptor specified by ``VHOST_USER_SET_VRING_KICK`` -or receiving the in-band message ``VHOST_USER_VRING_KICK`` if negotiated, -and stop ring upon receiving ``VHOST_USER_GET_VRING_BASE``. +Each ring is initialized in a stopped and disabled state. The back-end +must start a ring upon receiving a kick (that is, detecting that file +descriptor is readable) on the descriptor specified by +``VHOST_USER_SET_VRING_KICK`` or receiving the in-band message +``VHOST_USER_VRING_KICK`` if negotiated, and stop a ring upon receiving +``VHOST_USER_GET_VRING_BASE``. Rings can be enabled or disabled by ``VHOST_USER_SET_VRING_ENABLE``. -If ``VHOST_USER_F_PROTOCOL_FEATURES`` has not been negotiated, the -ring starts directly in the enabled state. - -If ``VHOST_USER_F_PROTOCOL_FEATURES`` has been negotiated, the ring is -initialized in a disabled state and is enabled by -``VHOST_USER_SET_VRING_ENABLE`` with parameter 1. +In addition, upon receiving a ``VHOST_USER_SET_FEATURES`` message from +the front-end without ``VHOST_USER_F_PROTOCOL_FEATURES`` set, the +back-end must enable all rings immediately. While processing the rings (whether they are enabled or not), the back-end must support changing some configuration aspects on the fly. +.. _suspended_device_state: + +Suspended device state +^^^^^^^^^^^^^^^^^^^^^^ + +While all vrings are stopped, the device is *suspended*. In addition to +not processing any vring (because they are stopped), the device must: + +* not write to any guest memory regions, +* not send any notifications to the guest, +* not send any messages to the front-end, +* still process and reply to messages from the front-end. + Multiple queue support ---------------------- @@ -490,7 +569,8 @@ ancillary data, it may be used to inform the front-end that the log has been modified. Once the source has finished migration, rings will be stopped by the -source. No further update must be done before rings are restarted. +source (:ref:`Suspended device state <suspended_device_state>`). No +further update must be done before rings are restarted. In postcopy migration the back-end is started before all the memory has been received from the source host, and care must be taken to avoid @@ -502,6 +582,80 @@ it performs WAKE ioctl's on the userfaultfd to wake the stalled back-end. The front-end indicates support for this via the ``VHOST_USER_PROTOCOL_F_PAGEFAULT`` feature. +.. _migrating_backend_state: + +Migrating back-end state +^^^^^^^^^^^^^^^^^^^^^^^^ + +Migrating device state involves transferring the state from one +back-end, called the source, to another back-end, called the +destination. After migration, the destination transparently resumes +operation without requiring the driver to re-initialize the device at +the VIRTIO level. If the migration fails, then the source can +transparently resume operation until another migration attempt is made. + +Generally, the front-end is connected to a virtual machine guest (which +contains the driver), which has its own state to transfer between source +and destination, and therefore will have an implementation-specific +mechanism to do so. The ``VHOST_USER_PROTOCOL_F_DEVICE_STATE`` feature +provides functionality to have the front-end include the back-end's +state in this transfer operation so the back-end does not need to +implement its own mechanism, and so the virtual machine may have its +complete state, including vhost-user devices' states, contained within a +single stream of data. + +To do this, the back-end state is transferred from back-end to front-end +on the source side, and vice versa on the destination side. This +transfer happens over a channel that is negotiated using the +``VHOST_USER_SET_DEVICE_STATE_FD`` message. This message has two +parameters: + +* Direction of transfer: On the source, the data is saved, transferring + it from the back-end to the front-end. On the destination, the data + is loaded, transferring it from the front-end to the back-end. + +* Migration phase: Currently, the only supported phase is the period + after the transfer of memory-mapped regions before switch-over to the + destination, when both the source and destination devices are + suspended (:ref:`Suspended device state <suspended_device_state>`). + In the future, additional phases might be supported to allow iterative + migration while the device is running. + +The nature of the channel is implementation-defined, but it must +generally behave like a pipe: The writing end will write all the data it +has into it, signalling the end of data by closing its end. The reading +end must read all of this data (until encountering the end of file) and +process it. + +* When saving, the writing end is the source back-end, and the reading + end is the source front-end. After reading the state data from the + channel, the source front-end must transfer it to the destination + front-end through an implementation-defined mechanism. + +* When loading, the writing end is the destination front-end, and the + reading end is the destination back-end. After reading the state data + from the channel, the destination back-end must deserialize its + internal state from that data and set itself up to allow the driver to + seamlessly resume operation on the VIRTIO level. + +Seamlessly resuming operation means that the migration must be +transparent to the guest driver, which operates on the VIRTIO level. +This driver will not perform any re-initialization steps, but continue +to use the device as if no migration had occurred. The vhost-user +front-end, however, will re-initialize the vhost state on the +destination, following the usual protocol for establishing a connection +to a vhost-user back-end: This includes, for example, setting up memory +mappings and kick and call FDs as necessary, negotiating protocol +features, or setting the initial vring base indices (to the same value +as on the source side, so that operation can resume). + +Both on the source and on the destination side, after the respective +front-end has seen all data transferred (when the transfer FD has been +closed), it sends the ``VHOST_USER_CHECK_DEVICE_STATE`` message to +verify that data transfer was successful in the back-end, too. The +back-end responds once it knows whether the transfer and processing was +successful or not. + Memory access ------------- @@ -896,6 +1050,7 @@ Protocol features #define VHOST_USER_PROTOCOL_F_STATUS 16 #define VHOST_USER_PROTOCOL_F_XEN_MMAP 17 #define VHOST_USER_PROTOCOL_F_SHARED_OBJECT 18 + #define VHOST_USER_PROTOCOL_F_DEVICE_STATE 19 Front-end message types ----------------------- @@ -1042,18 +1197,54 @@ Front-end message types ``VHOST_USER_SET_VRING_BASE`` :id: 10 :equivalent ioctl: ``VHOST_SET_VRING_BASE`` - :request payload: vring state description + :request payload: vring descriptor index/indices :reply payload: N/A - Sets the base offset in the available vring. + Sets the next index to use for descriptors in this vring: + + * For a split virtqueue, sets only the next descriptor index to + process in the *Available Ring*. The device is supposed to read the + next index in the *Used Ring* from the respective vring structure in + guest memory. + + * For a packed virtqueue, both indices are supplied, as they are not + explicitly available in memory. + + Consequently, the payload type is specific to the type of virt queue + (*a vring descriptor index for split virtqueues* vs. *vring descriptor + indices for packed virtqueues*). ``VHOST_USER_GET_VRING_BASE`` :id: 11 :equivalent ioctl: ``VHOST_USER_GET_VRING_BASE`` :request payload: vring state description - :reply payload: vring state description + :reply payload: vring descriptor index/indices + + Stops the vring and returns the current descriptor index or indices: + + * For a split virtqueue, returns only the 16-bit next descriptor + index to process in the *Available Ring*. Note that this may + differ from the available ring index in the vring structure in + memory, which points to where the driver will put new available + descriptors. For the *Used Ring*, the device only needs the next + descriptor index at which to put new descriptors, which is the + value in the vring structure in memory, so this value is not + covered by this message. + + * For a packed virtqueue, neither index is explicitly available to + read from memory, so both indices (as maintained by the device) are + returned. + + Consequently, the payload type is specific to the type of virt queue + (*a vring descriptor index for split virtqueues* vs. *vring descriptor + indices for packed virtqueues*). - Get the available vring base offset. + When and as long as all of a device’s vrings are stopped, it is + *suspended*, see :ref:`Suspended device state + <suspended_device_state>`. + + The request payload’s *num* field is currently reserved and must be + set to 0. ``VHOST_USER_SET_VRING_KICK`` :id: 12 @@ -1464,6 +1655,76 @@ Front-end message types the requested UUID. Back-end will reply passing the fd when the operation is successful, or no fd otherwise. +``VHOST_USER_SET_DEVICE_STATE_FD`` + :id: 42 + :equivalent ioctl: N/A + :request payload: device state transfer parameters + :reply payload: ``u64`` + + Front-end and back-end negotiate a channel over which to transfer the + back-end’s internal state during migration. Either side (front-end or + back-end) may create the channel. The nature of this channel is not + restricted or defined in this document, but whichever side creates it + must create a file descriptor that is provided to the respectively + other side, allowing access to the channel. This FD must behave as + follows: + + * For the writing end, it must allow writing the whole back-end state + sequentially. Closing the file descriptor signals the end of + transfer. + + * For the reading end, it must allow reading the whole back-end state + sequentially. The end of file signals the end of the transfer. + + For example, the channel may be a pipe, in which case the two ends of + the pipe fulfill these requirements respectively. + + Initially, the front-end creates a channel along with such an FD. It + passes the FD to the back-end as ancillary data of a + ``VHOST_USER_SET_DEVICE_STATE_FD`` message. The back-end may create a + different transfer channel, passing the respective FD back to the + front-end as ancillary data of the reply. If so, the front-end must + then discard its channel and use the one provided by the back-end. + + Whether the back-end should decide to use its own channel is decided + based on efficiency: If the channel is a pipe, both ends will most + likely need to copy data into and out of it. Any channel that allows + for more efficient processing on at least one end, e.g. through + zero-copy, is considered more efficient and thus preferred. If the + back-end can provide such a channel, it should decide to use it. + + The request payload contains parameters for the subsequent data + transfer, as described in the :ref:`Migrating back-end state + <migrating_backend_state>` section. + + The value returned is both an indication for success, and whether a + file descriptor for a back-end-provided channel is returned: Bits 0–7 + are 0 on success, and non-zero on error. Bit 8 is the invalid FD + flag; this flag is set when there is no file descriptor returned. + When this flag is not set, the front-end must use the returned file + descriptor as its end of the transfer channel. The back-end must not + both indicate an error and return a file descriptor. + + Using this function requires prior negotiation of the + ``VHOST_USER_PROTOCOL_F_DEVICE_STATE`` feature. + +``VHOST_USER_CHECK_DEVICE_STATE`` + :id: 43 + :equivalent ioctl: N/A + :request payload: N/A + :reply payload: ``u64`` + + After transferring the back-end’s internal state during migration (see + the :ref:`Migrating back-end state <migrating_backend_state>` + section), check whether the back-end was able to successfully fully + process the state. + + The value returned indicates success or error; 0 is success, any + non-zero value is an error. + + Using this function requires prior negotiation of the + ``VHOST_USER_PROTOCOL_F_DEVICE_STATE`` feature. + Back-end message types ---------------------- diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst index 1167f3a9f2..d1f3277cb0 100644 --- a/docs/system/device-emulation.rst +++ b/docs/system/device-emulation.rst @@ -93,6 +93,7 @@ Emulated Devices devices/vhost-user.rst devices/virtio-gpu.rst devices/virtio-pmem.rst + devices/virtio-snd.rst devices/vhost-user-rng.rst devices/canokey.rst devices/usb-u2f.rst diff --git a/docs/system/devices/virtio-snd.rst b/docs/system/devices/virtio-snd.rst new file mode 100644 index 0000000000..2a9187fd70 --- /dev/null +++ b/docs/system/devices/virtio-snd.rst @@ -0,0 +1,49 @@ +virtio sound +============ + +This document explains the setup and usage of the Virtio sound device. +The Virtio sound device is a paravirtualized sound card device. + +Linux kernel support +-------------------- + +Virtio sound requires a guest Linux kernel built with the +``CONFIG_SND_VIRTIO`` option. + +Description +----------- + +Virtio sound implements capture and playback from inside a guest using the +configured audio backend of the host machine. + +Device properties +----------------- + +The Virtio sound device can be configured with the following properties: + + * ``jacks`` number of physical jacks (Unimplemented). + * ``streams`` number of PCM streams. At the moment, no stream configuration is supported: the first one will always be a playback stream, an optional second will always be a capture stream. Adding more will cycle stream directions from playback to capture. + * ``chmaps`` number of channel maps (Unimplemented). + +All streams are stereo and have the default channel positions ``Front left, right``. + +Examples +-------- + +Add an audio device and an audio backend at once with ``-audio`` and ``model=virtio``: + + * pulseaudio: ``-audio driver=pa,model=virtio`` + or ``-audio driver=pa,model=virtio,server=/run/user/1000/pulse/native`` + * sdl: ``-audio driver=sdl,model=virtio`` + * coreaudio: ``-audio driver=coreaudio,model=virtio`` + +etc. + +To specifically add virtualized sound devices, you have to specify a PCI device +and an audio backend listed with ``-audio driver=help`` that works on your host +machine, e.g.: + +:: + + -device virtio-sound-pci,audiodev=my_audiodev \ + -audiodev alsa,id=my_audiodev diff --git a/docs/system/i386/xen.rst b/docs/system/i386/xen.rst index f06765e88c..81898768ba 100644 --- a/docs/system/i386/xen.rst +++ b/docs/system/i386/xen.rst @@ -15,46 +15,24 @@ Setup ----- Xen mode is enabled by setting the ``xen-version`` property of the KVM -accelerator, for example for Xen 4.10: +accelerator, for example for Xen 4.17: .. parsed-literal:: - |qemu_system| --accel kvm,xen-version=0x4000a,kernel-irqchip=split + |qemu_system| --accel kvm,xen-version=0x40011,kernel-irqchip=split Additionally, virtual APIC support can be advertised to the guest through the ``xen-vapic`` CPU flag: .. parsed-literal:: - |qemu_system| --accel kvm,xen-version=0x4000a,kernel-irqchip=split --cpu host,+xen_vapic + |qemu_system| --accel kvm,xen-version=0x40011,kernel-irqchip=split --cpu host,+xen-vapic When Xen support is enabled, QEMU changes hypervisor identification (CPUID 0x40000000..0x4000000A) to Xen. The KVM identification and features are not advertised to a Xen guest. If Hyper-V is also enabled, the Xen identification moves to leaves 0x40000100..0x4000010A. -The Xen platform device is enabled automatically for a Xen guest. This allows -a guest to unplug all emulated devices, in order to use Xen PV block and network -drivers instead. Under Xen, the boot disk is typically available both via IDE -emulation, and as a PV block device. Guest bootloaders typically use IDE to load -the guest kernel, which then unplugs the IDE and continues with the Xen PV block -device. - -This configuration can be achieved as follows - -.. parsed-literal:: - - |qemu_system| -M pc --accel kvm,xen-version=0x4000a,kernel-irqchip=split \\ - -drive file=${GUEST_IMAGE},if=none,id=disk,file.locking=off -device xen-disk,drive=disk,vdev=xvda \\ - -drive file=${GUEST_IMAGE},index=2,media=disk,file.locking=off,if=ide - -It is necessary to use the pc machine type, as the q35 machine uses AHCI instead -of legacy IDE, and AHCI disks are not unplugged through the Xen PV unplug -mechanism. - -VirtIO devices can also be used; Linux guests may need to be dissuaded from -umplugging them by adding 'xen_emul_unplug=never' on their command line. - Properties ---------- @@ -63,7 +41,10 @@ The following properties exist on the KVM accelerator object: ``xen-version`` This property contains the Xen version in ``XENVER_version`` form, with the major version in the top 16 bits and the minor version in the low 16 bits. - Setting this property enables the Xen guest support. + Setting this property enables the Xen guest support. If Xen version 4.5 or + greater is specified, the HVM leaf in Xen CPUID is populated. Xen version + 4.6 enables the vCPU ID in CPUID, and version 4.17 advertises vCPU upcall + vector support to the guest. ``xen-evtchn-max-pirq`` Xen PIRQs represent an emulated physical interrupt, either GSI or MSI, which @@ -83,8 +64,78 @@ The following properties exist on the KVM accelerator object: through simultaneous grants. For guests with large numbers of PV devices and high throughput, it may be desirable to increase this value. -OS requirements ---------------- +Xen paravirtual devices +----------------------- + +The Xen PCI platform device is enabled automatically for a Xen guest. This +allows a guest to unplug all emulated devices, in order to use paravirtual +block and network drivers instead. + +Those paravirtual Xen block, network (and console) devices can be created +through the command line, and/or hot-plugged. + +To provide a Xen console device, define a character device and then a device +of type ``xen-console`` to connect to it. For the Xen console equivalent of +the handy ``-serial mon:stdio`` option, for example: + +.. parsed-literal:: + -chardev stdio,mux=on,id=char0,signal=off -mon char0 \\ + -device xen-console,chardev=char0 + +The Xen network device is ``xen-net-device``, which becomes the default NIC +model for emulated Xen guests, meaning that just the default NIC provided +by QEMU should automatically work and present a Xen network device to the +guest. + +Disks can be configured with '``-drive file=${GUEST_IMAGE},if=xen``' and will +appear to the guest as ``xvda`` onwards. + +Under Xen, the boot disk is typically available both via IDE emulation, and +as a PV block device. Guest bootloaders typically use IDE to load the guest +kernel, which then unplugs the IDE and continues with the Xen PV block device. + +This configuration can be achieved as follows: + +.. parsed-literal:: + + |qemu_system| --accel kvm,xen-version=0x40011,kernel-irqchip=split \\ + -drive file=${GUEST_IMAGE},if=xen \\ + -drive file=${GUEST_IMAGE},file.locking=off,if=ide + +VirtIO devices can also be used; Linux guests may need to be dissuaded from +umplugging them by adding '``xen_emul_unplug=never``' on their command line. + +Booting Xen PV guests +--------------------- + +Booting PV guest kernels is possible by using the Xen PV shim (a version of Xen +itself, designed to run inside a Xen HVM guest and provide memory management +services for one guest alone). + +The Xen binary is provided as the ``-kernel`` and the guest kernel itself (or +PV Grub image) as the ``-initrd`` image, which actually just means the first +multiboot "module". For example: + +.. parsed-literal:: + + |qemu_system| --accel kvm,xen-version=0x40011,kernel-irqchip=split \\ + -chardev stdio,id=char0 -device xen-console,chardev=char0 \\ + -display none -m 1G -kernel xen -initrd bzImage \\ + -append "pv-shim console=xen,pv -- console=hvc0 root=/dev/xvda1" \\ + -drive file=${GUEST_IMAGE},if=xen + +The Xen image must be built with the ``CONFIG_XEN_GUEST`` and ``CONFIG_PV_SHIM`` +options, and as of Xen 4.17, Xen's PV shim mode does not support using a serial +port; it must have a Xen console or it will panic. + +The example above provides the guest kernel command line after a separator +(" ``--`` ") on the Xen command line, and does not provide the guest kernel +with an actual initramfs, which would need to listed as a second multiboot +module. For more complicated alternatives, see the command line +documentation for the ``-initrd`` option. + +Host OS requirements +-------------------- The minimal Xen support in the KVM accelerator requires the host to be running Linux v5.12 or newer. Later versions add optimisations: Linux v5.17 added diff --git a/dump/dump.c b/dump/dump.c index 1c304cadfd..ad5294e853 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -2160,6 +2160,7 @@ void qmp_dump_guest_memory(bool paging, const char *protocol, return; } if (kdump_raw && lseek(fd, 0, SEEK_CUR) == (off_t) -1) { + close(fd); error_setg(errp, "kdump-raw formats require a seekable file"); return; } diff --git a/hmp-commands.hx b/hmp-commands.hx index c0a27688b6..765349ed14 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -252,6 +252,7 @@ SRST ERST +#ifdef CONFIG_PIXMAN { .name = "screendump", .args_type = "filename:F,format:-fs,device:s?,head:i?", @@ -267,6 +268,7 @@ SRST ``screendump`` *filename* Save screen into PPM image *filename*. ERST +#endif { .name = "logfile", diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index e35007ed41..3ada335a24 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -450,7 +450,7 @@ config STM32F405_SOC config XLNX_ZYNQMP_ARM bool - default y + default y if PIXMAN depends on TCG && AARCH64 select AHCI select ARM_GIC @@ -463,6 +463,7 @@ config XLNX_ZYNQMP_ARM select XILINX_AXI select XILINX_SPIPS select XLNX_CSU_DMA + select XLNX_DISPLAYPORT select XLNX_ZYNQMP select XLNX_ZDMA select USB_DWC3 @@ -483,12 +484,14 @@ config XLNX_VERSAL select XLNX_EFUSE_VERSAL select XLNX_USB_SUBSYS select XLNX_VERSAL_TRNG + select XLNX_CSU_DMA config NPCM7XX bool default y depends on TCG && ARM select A9MPCORE + select ADM1266 select ADM1272 select ARM_GIC select SMBUS diff --git a/hw/audio/Kconfig b/hw/audio/Kconfig index d0993514a1..daf060e1be 100644 --- a/hw/audio/Kconfig +++ b/hw/audio/Kconfig @@ -50,3 +50,8 @@ config CS4231 config ASC bool + +config VIRTIO_SND + bool + default y + depends on VIRTIO diff --git a/hw/audio/meson.build b/hw/audio/meson.build index 8805322f5c..2990974449 100644 --- a/hw/audio/meson.build +++ b/hw/audio/meson.build @@ -13,3 +13,5 @@ system_ss.add(when: 'CONFIG_PL041', if_true: files('pl041.c', 'lm4549.c')) system_ss.add(when: 'CONFIG_SB16', if_true: files('sb16.c')) system_ss.add(when: 'CONFIG_VT82C686', if_true: files('via-ac97.c')) system_ss.add(when: 'CONFIG_WM8750', if_true: files('wm8750.c')) +system_ss.add(when: ['CONFIG_VIRTIO_SND', 'CONFIG_VIRTIO'], if_true: files('virtio-snd.c')) +system_ss.add(when: ['CONFIG_VIRTIO_SND', 'CONFIG_VIRTIO', 'CONFIG_VIRTIO_PCI'], if_true: files('virtio-snd-pci.c')) diff --git a/hw/audio/trace-events b/hw/audio/trace-events index 059ce451f5..b1870ff224 100644 --- a/hw/audio/trace-events +++ b/hw/audio/trace-events @@ -38,3 +38,23 @@ asc_write_fifo(const char fifo, int reg, unsigned size, int wrptr, int cnt, uint asc_write_reg(int reg, unsigned size, uint64_t value) "reg=0x%03x size=%u value=0x%"PRIx64 asc_write_extreg(const char fifo, int reg, unsigned size, uint64_t value) "fifo %c reg=0x%03x size=%u value=0x%"PRIx64 asc_update_irq(int irq, int a, int b) "set IRQ to %d (A: 0x%x B: 0x%x)" + +#virtio-snd.c +virtio_snd_get_config(void *vdev, uint32_t jacks, uint32_t streams, uint32_t chmaps) "snd %p: get_config jacks=%"PRIu32" streams=%"PRIu32" chmaps=%"PRIu32"" +virtio_snd_set_config(void *vdev, uint32_t jacks, uint32_t new_jacks, uint32_t streams, uint32_t new_streams, uint32_t chmaps, uint32_t new_chmaps) "snd %p: set_config jacks from %"PRIu32"->%"PRIu32", streams from %"PRIu32"->%"PRIu32", chmaps from %"PRIu32"->%"PRIu32 +virtio_snd_get_features(void *vdev, uint64_t features) "snd %p: get_features 0x%"PRIx64 +virtio_snd_vm_state_running(void) "vm state running" +virtio_snd_vm_state_stopped(void) "vm state stopped" +virtio_snd_realize(void *snd) "snd %p: realize" +virtio_snd_unrealize(void *snd) "snd %p: unrealize" +virtio_snd_handle_pcm_set_params(uint32_t stream) "VIRTIO_SND_PCM_SET_PARAMS called for stream %"PRIu32 +virtio_snd_handle_ctrl(void *vdev, void *vq) "snd %p: handle ctrl event for queue %p" +virtio_snd_handle_pcm_info(uint32_t stream) "VIRTIO_SND_R_PCM_INFO called for stream %"PRIu32 +virtio_snd_handle_pcm_start_stop(const char *code, uint32_t stream) "%s called for stream %"PRIu32 +virtio_snd_handle_pcm_release(uint32_t stream) "VIRTIO_SND_PCM_RELEASE called for stream %"PRIu32 +virtio_snd_handle_code(uint32_t val, const char *code) "ctrl code msg val = %"PRIu32" == %s" +virtio_snd_handle_chmap_info(void) "VIRTIO_SND_CHMAP_INFO called" +virtio_snd_handle_event(void) "event queue callback called" +virtio_snd_pcm_stream_flush(uint32_t stream) "flushing stream %"PRIu32 +virtio_snd_handle_tx_xfer(void) "tx queue callback called" +virtio_snd_handle_rx_xfer(void) "rx queue callback called" diff --git a/hw/audio/virtio-snd-pci.c b/hw/audio/virtio-snd-pci.c new file mode 100644 index 0000000000..0f92e0752b --- /dev/null +++ b/hw/audio/virtio-snd-pci.c @@ -0,0 +1,93 @@ +/* + * VIRTIO Sound Device PCI Bindings + * + * Copyright (c) 2023 Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org> + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qom/object.h" +#include "qapi/error.h" +#include "hw/audio/soundhw.h" +#include "hw/virtio/virtio-pci.h" +#include "hw/audio/virtio-snd.h" + +/* + * virtio-snd-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_SND_PCI "virtio-sound-pci" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSoundPCI, VIRTIO_SND_PCI) + +struct VirtIOSoundPCI { + VirtIOPCIProxy parent_obj; + + VirtIOSound vdev; +}; + +static Property virtio_snd_pci_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOSoundPCI *dev = VIRTIO_SND_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + virtio_pci_force_virtio_1(vpci_dev); + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void virtio_snd_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); + + device_class_set_props(dc, virtio_snd_pci_properties); + dc->desc = "Virtio Sound"; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + + vpciklass->realize = virtio_snd_pci_realize; +} + +static void virtio_snd_pci_instance_init(Object *obj) +{ + VirtIOSoundPCI *dev = VIRTIO_SND_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_SND); +} + +static const VirtioPCIDeviceTypeInfo virtio_snd_pci_info = { + .generic_name = TYPE_VIRTIO_SND_PCI, + .instance_size = sizeof(VirtIOSoundPCI), + .instance_init = virtio_snd_pci_instance_init, + .class_init = virtio_snd_pci_class_init, +}; + +/* Create a Virtio Sound PCI device, so '-audio driver,model=virtio' works. */ +static int virtio_snd_pci_init(PCIBus *bus, const char *audiodev) +{ + DeviceState *vdev = NULL; + VirtIOSoundPCI *dev = NULL; + + vdev = qdev_new(TYPE_VIRTIO_SND_PCI); + assert(vdev); + dev = VIRTIO_SND_PCI(vdev); + qdev_prop_set_string(DEVICE(&dev->vdev), "audiodev", audiodev); + qdev_realize_and_unref(vdev, BUS(bus), &error_fatal); + return 0; +} + +static void virtio_snd_pci_register(void) +{ + virtio_pci_types_register(&virtio_snd_pci_info); + pci_register_soundhw("virtio", "Virtio Sound", virtio_snd_pci_init); +} + +type_init(virtio_snd_pci_register); diff --git a/hw/audio/virtio-snd.c b/hw/audio/virtio-snd.c new file mode 100644 index 0000000000..a18a9949a7 --- /dev/null +++ b/hw/audio/virtio-snd.c @@ -0,0 +1,1409 @@ +/* + * VIRTIO Sound Device conforming to + * + * "Virtual I/O Device (VIRTIO) Version 1.2 + * Committee Specification Draft 01 + * 09 May 2022" + * + * <https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-52900014> + * + * Copyright (c) 2023 Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org> + * Copyright (C) 2019 OpenSynergy GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/iov.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "include/qemu/lockable.h" +#include "sysemu/runstate.h" +#include "trace.h" +#include "qapi/error.h" +#include "hw/audio/virtio-snd.h" +#include "hw/core/cpu.h" + +#define VIRTIO_SOUND_VM_VERSION 1 +#define VIRTIO_SOUND_JACK_DEFAULT 0 +#define VIRTIO_SOUND_STREAM_DEFAULT 2 +#define VIRTIO_SOUND_CHMAP_DEFAULT 0 +#define VIRTIO_SOUND_HDA_FN_NID 0 + +static void virtio_snd_pcm_out_cb(void *data, int available); +static void virtio_snd_process_cmdq(VirtIOSound *s); +static void virtio_snd_pcm_flush(VirtIOSoundPCMStream *stream); +static void virtio_snd_pcm_in_cb(void *data, int available); + +static uint32_t supported_formats = BIT(VIRTIO_SND_PCM_FMT_S8) + | BIT(VIRTIO_SND_PCM_FMT_U8) + | BIT(VIRTIO_SND_PCM_FMT_S16) + | BIT(VIRTIO_SND_PCM_FMT_U16) + | BIT(VIRTIO_SND_PCM_FMT_S32) + | BIT(VIRTIO_SND_PCM_FMT_U32) + | BIT(VIRTIO_SND_PCM_FMT_FLOAT); + +static uint32_t supported_rates = BIT(VIRTIO_SND_PCM_RATE_5512) + | BIT(VIRTIO_SND_PCM_RATE_8000) + | BIT(VIRTIO_SND_PCM_RATE_11025) + | BIT(VIRTIO_SND_PCM_RATE_16000) + | BIT(VIRTIO_SND_PCM_RATE_22050) + | BIT(VIRTIO_SND_PCM_RATE_32000) + | BIT(VIRTIO_SND_PCM_RATE_44100) + | BIT(VIRTIO_SND_PCM_RATE_48000) + | BIT(VIRTIO_SND_PCM_RATE_64000) + | BIT(VIRTIO_SND_PCM_RATE_88200) + | BIT(VIRTIO_SND_PCM_RATE_96000) + | BIT(VIRTIO_SND_PCM_RATE_176400) + | BIT(VIRTIO_SND_PCM_RATE_192000) + | BIT(VIRTIO_SND_PCM_RATE_384000); + +static const VMStateDescription vmstate_virtio_snd_device = { + .name = TYPE_VIRTIO_SND, + .version_id = VIRTIO_SOUND_VM_VERSION, + .minimum_version_id = VIRTIO_SOUND_VM_VERSION, +}; + +static const VMStateDescription vmstate_virtio_snd = { + .name = TYPE_VIRTIO_SND, + .minimum_version_id = VIRTIO_SOUND_VM_VERSION, + .version_id = VIRTIO_SOUND_VM_VERSION, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static Property virtio_snd_properties[] = { + DEFINE_AUDIO_PROPERTIES(VirtIOSound, card), + DEFINE_PROP_UINT32("jacks", VirtIOSound, snd_conf.jacks, + VIRTIO_SOUND_JACK_DEFAULT), + DEFINE_PROP_UINT32("streams", VirtIOSound, snd_conf.streams, + VIRTIO_SOUND_STREAM_DEFAULT), + DEFINE_PROP_UINT32("chmaps", VirtIOSound, snd_conf.chmaps, + VIRTIO_SOUND_CHMAP_DEFAULT), + DEFINE_PROP_END_OF_LIST(), +}; + +static void +virtio_snd_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VirtIOSound *s = VIRTIO_SND(vdev); + virtio_snd_config *sndconfig = + (virtio_snd_config *)config; + trace_virtio_snd_get_config(vdev, + s->snd_conf.jacks, + s->snd_conf.streams, + s->snd_conf.chmaps); + + memcpy(sndconfig, &s->snd_conf, sizeof(s->snd_conf)); + cpu_to_le32s(&sndconfig->jacks); + cpu_to_le32s(&sndconfig->streams); + cpu_to_le32s(&sndconfig->chmaps); + +} + +static void +virtio_snd_set_config(VirtIODevice *vdev, const uint8_t *config) +{ + VirtIOSound *s = VIRTIO_SND(vdev); + const virtio_snd_config *sndconfig = + (const virtio_snd_config *)config; + + + trace_virtio_snd_set_config(vdev, + s->snd_conf.jacks, + sndconfig->jacks, + s->snd_conf.streams, + sndconfig->streams, + s->snd_conf.chmaps, + sndconfig->chmaps); + + memcpy(&s->snd_conf, sndconfig, sizeof(virtio_snd_config)); + le32_to_cpus(&s->snd_conf.jacks); + le32_to_cpus(&s->snd_conf.streams); + le32_to_cpus(&s->snd_conf.chmaps); + +} + +static void +virtio_snd_pcm_buffer_free(VirtIOSoundPCMBuffer *buffer) +{ + g_free(buffer->elem); + g_free(buffer); +} + +static void +virtio_snd_ctrl_cmd_free(virtio_snd_ctrl_command *cmd) +{ + g_free(cmd->elem); + g_free(cmd); +} + +/* + * Get a specific stream from the virtio sound card device. + * Returns NULL if @stream_id is invalid or not allocated. + * + * @s: VirtIOSound device + * @stream_id: stream id + */ +static VirtIOSoundPCMStream *virtio_snd_pcm_get_stream(VirtIOSound *s, + uint32_t stream_id) +{ + return stream_id >= s->snd_conf.streams ? NULL : + s->pcm->streams[stream_id]; +} + +/* + * Get params for a specific stream. + * + * @s: VirtIOSound device + * @stream_id: stream id + */ +static virtio_snd_pcm_set_params *virtio_snd_pcm_get_params(VirtIOSound *s, + uint32_t stream_id) +{ + return stream_id >= s->snd_conf.streams ? NULL + : &s->pcm->pcm_params[stream_id]; +} + +/* + * Handle the VIRTIO_SND_R_PCM_INFO request. + * The function writes the info structs to the request element. + * + * @s: VirtIOSound device + * @cmd: The request command queue element from VirtIOSound cmdq field + */ +static void virtio_snd_handle_pcm_info(VirtIOSound *s, + virtio_snd_ctrl_command *cmd) +{ + uint32_t stream_id, start_id, count, size; + virtio_snd_pcm_info val; + virtio_snd_query_info req; + VirtIOSoundPCMStream *stream = NULL; + g_autofree virtio_snd_pcm_info *pcm_info = NULL; + size_t msg_sz = iov_to_buf(cmd->elem->out_sg, + cmd->elem->out_num, + 0, + &req, + sizeof(virtio_snd_query_info)); + + if (msg_sz != sizeof(virtio_snd_query_info)) { + /* + * TODO: do we need to set DEVICE_NEEDS_RESET? + */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: virtio-snd command size incorrect %zu vs \ + %zu\n", __func__, msg_sz, sizeof(virtio_snd_query_info)); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + return; + } + + start_id = le32_to_cpu(req.start_id); + count = le32_to_cpu(req.count); + size = le32_to_cpu(req.size); + + if (iov_size(cmd->elem->in_sg, cmd->elem->in_num) < + sizeof(virtio_snd_hdr) + size * count) { + /* + * TODO: do we need to set DEVICE_NEEDS_RESET? + */ + error_report("pcm info: buffer too small, got: %zu, needed: %zu", + iov_size(cmd->elem->in_sg, cmd->elem->in_num), + sizeof(virtio_snd_pcm_info)); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + return; + } + + pcm_info = g_new0(virtio_snd_pcm_info, count); + for (uint32_t i = 0; i < count; i++) { + stream_id = i + start_id; + trace_virtio_snd_handle_pcm_info(stream_id); + stream = virtio_snd_pcm_get_stream(s, stream_id); + if (!stream) { + error_report("Invalid stream id: %"PRIu32, stream_id); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + return; + } + val = stream->info; + val.hdr.hda_fn_nid = cpu_to_le32(val.hdr.hda_fn_nid); + val.features = cpu_to_le32(val.features); + val.formats = cpu_to_le64(val.formats); + val.rates = cpu_to_le64(val.rates); + /* + * 5.14.6.6.2.1 Device Requirements: Stream Information The device MUST + * NOT set undefined feature, format, rate and direction values. The + * device MUST initialize the padding bytes to 0. + */ + pcm_info[i] = val; + memset(&pcm_info[i].padding, 0, 5); + } + + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_OK); + iov_from_buf(cmd->elem->in_sg, + cmd->elem->in_num, + sizeof(virtio_snd_hdr), + pcm_info, + sizeof(virtio_snd_pcm_info) * count); +} + +/* + * Set the given stream params. + * Called by both virtio_snd_handle_pcm_set_params and during device + * initialization. + * Returns the response status code. (VIRTIO_SND_S_*). + * + * @s: VirtIOSound device + * @params: The PCM params as defined in the virtio specification + */ +static +uint32_t virtio_snd_set_pcm_params(VirtIOSound *s, + uint32_t stream_id, + virtio_snd_pcm_set_params *params) +{ + virtio_snd_pcm_set_params *st_params; + + if (stream_id >= s->snd_conf.streams || s->pcm->pcm_params == NULL) { + /* + * TODO: do we need to set DEVICE_NEEDS_RESET? + */ + virtio_error(VIRTIO_DEVICE(s), "Streams have not been initialized.\n"); + return cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + } + + st_params = virtio_snd_pcm_get_params(s, stream_id); + + if (params->channels < 1 || params->channels > AUDIO_MAX_CHANNELS) { + error_report("Number of channels is not supported."); + return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); + } + if (!(supported_formats & BIT(params->format))) { + error_report("Stream format is not supported."); + return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); + } + if (!(supported_rates & BIT(params->rate))) { + error_report("Stream rate is not supported."); + return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); + } + + st_params->buffer_bytes = le32_to_cpu(params->buffer_bytes); + st_params->period_bytes = le32_to_cpu(params->period_bytes); + st_params->features = le32_to_cpu(params->features); + /* the following are uint8_t, so there's no need to bswap the values. */ + st_params->channels = params->channels; + st_params->format = params->format; + st_params->rate = params->rate; + + return cpu_to_le32(VIRTIO_SND_S_OK); +} + +/* + * Handles the VIRTIO_SND_R_PCM_SET_PARAMS request. + * + * @s: VirtIOSound device + * @cmd: The request command queue element from VirtIOSound cmdq field + */ +static void virtio_snd_handle_pcm_set_params(VirtIOSound *s, + virtio_snd_ctrl_command *cmd) +{ + virtio_snd_pcm_set_params req = { 0 }; + uint32_t stream_id; + size_t msg_sz = iov_to_buf(cmd->elem->out_sg, + cmd->elem->out_num, + 0, + &req, + sizeof(virtio_snd_pcm_set_params)); + + if (msg_sz != sizeof(virtio_snd_pcm_set_params)) { + /* + * TODO: do we need to set DEVICE_NEEDS_RESET? + */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: virtio-snd command size incorrect %zu vs \ + %zu\n", __func__, msg_sz, sizeof(virtio_snd_pcm_set_params)); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + return; + } + stream_id = le32_to_cpu(req.hdr.stream_id); + trace_virtio_snd_handle_pcm_set_params(stream_id); + cmd->resp.code = virtio_snd_set_pcm_params(s, stream_id, &req); +} + +/* + * Get a QEMU Audiosystem compatible format value from a VIRTIO_SND_PCM_FMT_* + */ +static AudioFormat virtio_snd_get_qemu_format(uint32_t format) +{ + #define CASE(FMT) \ + case VIRTIO_SND_PCM_FMT_##FMT: \ + return AUDIO_FORMAT_##FMT; + + switch (format) { + CASE(U8) + CASE(S8) + CASE(U16) + CASE(S16) + CASE(U32) + CASE(S32) + case VIRTIO_SND_PCM_FMT_FLOAT: + return AUDIO_FORMAT_F32; + default: + g_assert_not_reached(); + } + + #undef CASE +} + +/* + * Get a QEMU Audiosystem compatible frequency value from a + * VIRTIO_SND_PCM_RATE_* + */ +static uint32_t virtio_snd_get_qemu_freq(uint32_t rate) +{ + #define CASE(RATE) \ + case VIRTIO_SND_PCM_RATE_##RATE: \ + return RATE; + + switch (rate) { + CASE(5512) + CASE(8000) + CASE(11025) + CASE(16000) + CASE(22050) + CASE(32000) + CASE(44100) + CASE(48000) + CASE(64000) + CASE(88200) + CASE(96000) + CASE(176400) + CASE(192000) + CASE(384000) + default: + g_assert_not_reached(); + } + + #undef CASE +} + +/* + * Get QEMU Audiosystem compatible audsettings from virtio based pcm stream + * params. + */ +static void virtio_snd_get_qemu_audsettings(audsettings *as, + virtio_snd_pcm_set_params *params) +{ + as->nchannels = MIN(AUDIO_MAX_CHANNELS, params->channels); + as->fmt = virtio_snd_get_qemu_format(params->format); + as->freq = virtio_snd_get_qemu_freq(params->rate); + as->endianness = target_words_bigendian() ? 1 : 0; +} + +/* + * Close a stream and free all its resources. + * + * @stream: VirtIOSoundPCMStream *stream + */ +static void virtio_snd_pcm_close(VirtIOSoundPCMStream *stream) +{ + if (stream) { + virtio_snd_pcm_flush(stream); + if (stream->info.direction == VIRTIO_SND_D_OUTPUT) { + AUD_close_out(&stream->pcm->snd->card, stream->voice.out); + stream->voice.out = NULL; + } else if (stream->info.direction == VIRTIO_SND_D_INPUT) { + AUD_close_in(&stream->pcm->snd->card, stream->voice.in); + stream->voice.in = NULL; + } + } +} + +/* + * Prepares a VirtIOSound card stream. + * Returns the response status code. (VIRTIO_SND_S_*). + * + * @s: VirtIOSound device + * @stream_id: stream id + */ +static uint32_t virtio_snd_pcm_prepare(VirtIOSound *s, uint32_t stream_id) +{ + audsettings as; + virtio_snd_pcm_set_params *params; + VirtIOSoundPCMStream *stream; + + if (s->pcm->streams == NULL || + s->pcm->pcm_params == NULL || + stream_id >= s->snd_conf.streams) { + return cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + } + + params = virtio_snd_pcm_get_params(s, stream_id); + if (params == NULL) { + return cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + } + + stream = virtio_snd_pcm_get_stream(s, stream_id); + if (stream == NULL) { + stream = g_new0(VirtIOSoundPCMStream, 1); + stream->active = false; + stream->id = stream_id; + stream->pcm = s->pcm; + stream->s = s; + qemu_mutex_init(&stream->queue_mutex); + QSIMPLEQ_INIT(&stream->queue); + QSIMPLEQ_INIT(&stream->invalid); + + /* + * stream_id >= s->snd_conf.streams was checked before so this is + * in-bounds + */ + s->pcm->streams[stream_id] = stream; + } + + virtio_snd_get_qemu_audsettings(&as, params); + stream->info.direction = stream_id < s->snd_conf.streams / 2 + + (s->snd_conf.streams & 1) ? VIRTIO_SND_D_OUTPUT : VIRTIO_SND_D_INPUT; + stream->info.hdr.hda_fn_nid = VIRTIO_SOUND_HDA_FN_NID; + stream->info.features = 0; + stream->info.channels_min = 1; + stream->info.channels_max = as.nchannels; + stream->info.formats = supported_formats; + stream->info.rates = supported_rates; + stream->params = *params; + + stream->positions[0] = VIRTIO_SND_CHMAP_FL; + stream->positions[1] = VIRTIO_SND_CHMAP_FR; + stream->as = as; + + if (stream->info.direction == VIRTIO_SND_D_OUTPUT) { + stream->voice.out = AUD_open_out(&s->card, + stream->voice.out, + "virtio-sound.out", + stream, + virtio_snd_pcm_out_cb, + &as); + AUD_set_volume_out(stream->voice.out, 0, 255, 255); + } else { + stream->voice.in = AUD_open_in(&s->card, + stream->voice.in, + "virtio-sound.in", + stream, + virtio_snd_pcm_in_cb, + &as); + AUD_set_volume_in(stream->voice.in, 0, 255, 255); + } + + return cpu_to_le32(VIRTIO_SND_S_OK); +} + +static const char *print_code(uint32_t code) +{ + #define CASE(CODE) \ + case VIRTIO_SND_R_##CODE: \ + return "VIRTIO_SND_R_"#CODE + + switch (code) { + CASE(JACK_INFO); + CASE(JACK_REMAP); + CASE(PCM_INFO); + CASE(PCM_SET_PARAMS); + CASE(PCM_PREPARE); + CASE(PCM_RELEASE); + CASE(PCM_START); + CASE(PCM_STOP); + CASE(CHMAP_INFO); + default: + return "invalid code"; + } + + #undef CASE +}; + +/* + * Handles VIRTIO_SND_R_PCM_PREPARE. + * + * @s: VirtIOSound device + * @cmd: The request command queue element from VirtIOSound cmdq field + */ +static void virtio_snd_handle_pcm_prepare(VirtIOSound *s, + virtio_snd_ctrl_command *cmd) +{ + uint32_t stream_id; + size_t msg_sz = iov_to_buf(cmd->elem->out_sg, + cmd->elem->out_num, + sizeof(virtio_snd_hdr), + &stream_id, + sizeof(stream_id)); + + stream_id = le32_to_cpu(stream_id); + cmd->resp.code = msg_sz == sizeof(stream_id) + ? virtio_snd_pcm_prepare(s, stream_id) + : cpu_to_le32(VIRTIO_SND_S_BAD_MSG); +} + +/* + * Handles VIRTIO_SND_R_PCM_START. + * + * @s: VirtIOSound device + * @cmd: The request command queue element from VirtIOSound cmdq field + * @start: whether to start or stop the device + */ +static void virtio_snd_handle_pcm_start_stop(VirtIOSound *s, + virtio_snd_ctrl_command *cmd, + bool start) +{ + VirtIOSoundPCMStream *stream; + virtio_snd_pcm_hdr req; + uint32_t stream_id; + size_t msg_sz = iov_to_buf(cmd->elem->out_sg, + cmd->elem->out_num, + 0, + &req, + sizeof(virtio_snd_pcm_hdr)); + + if (msg_sz != sizeof(virtio_snd_pcm_hdr)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: virtio-snd command size incorrect %zu vs \ + %zu\n", __func__, msg_sz, sizeof(virtio_snd_pcm_hdr)); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + return; + } + + stream_id = le32_to_cpu(req.stream_id); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_OK); + trace_virtio_snd_handle_pcm_start_stop(start ? "VIRTIO_SND_R_PCM_START" : + "VIRTIO_SND_R_PCM_STOP", stream_id); + + stream = virtio_snd_pcm_get_stream(s, stream_id); + if (stream) { + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + stream->active = start; + } + if (stream->info.direction == VIRTIO_SND_D_OUTPUT) { + AUD_set_active_out(stream->voice.out, start); + } else { + AUD_set_active_in(stream->voice.in, start); + } + } else { + error_report("Invalid stream id: %"PRIu32, stream_id); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + return; + } + stream->active = start; +} + +/* + * Returns the number of I/O messages that are being processed. + * + * @stream: VirtIOSoundPCMStream + */ +static size_t virtio_snd_pcm_get_io_msgs_count(VirtIOSoundPCMStream *stream) +{ + VirtIOSoundPCMBuffer *buffer, *next; + size_t count = 0; + + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + QSIMPLEQ_FOREACH_SAFE(buffer, &stream->queue, entry, next) { + count += 1; + } + QSIMPLEQ_FOREACH_SAFE(buffer, &stream->invalid, entry, next) { + count += 1; + } + } + return count; +} + +/* + * Handles VIRTIO_SND_R_PCM_RELEASE. + * + * @s: VirtIOSound device + * @cmd: The request command queue element from VirtIOSound cmdq field + */ +static void virtio_snd_handle_pcm_release(VirtIOSound *s, + virtio_snd_ctrl_command *cmd) +{ + uint32_t stream_id; + VirtIOSoundPCMStream *stream; + size_t msg_sz = iov_to_buf(cmd->elem->out_sg, + cmd->elem->out_num, + sizeof(virtio_snd_hdr), + &stream_id, + sizeof(stream_id)); + + if (msg_sz != sizeof(stream_id)) { + /* + * TODO: do we need to set DEVICE_NEEDS_RESET? + */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: virtio-snd command size incorrect %zu vs \ + %zu\n", __func__, msg_sz, sizeof(stream_id)); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + return; + } + + stream_id = le32_to_cpu(stream_id); + trace_virtio_snd_handle_pcm_release(stream_id); + stream = virtio_snd_pcm_get_stream(s, stream_id); + if (stream == NULL) { + /* + * TODO: do we need to set DEVICE_NEEDS_RESET? + */ + error_report("already released stream %"PRIu32, stream_id); + virtio_error(VIRTIO_DEVICE(s), + "already released stream %"PRIu32, + stream_id); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + return; + } + + if (virtio_snd_pcm_get_io_msgs_count(stream)) { + /* + * virtio-v1.2-csd01, 5.14.6.6.5.1, + * Device Requirements: Stream Release + * + * - The device MUST complete all pending I/O messages for the + * specified stream ID. + * - The device MUST NOT complete the control request while there + * are pending I/O messages for the specified stream ID. + */ + trace_virtio_snd_pcm_stream_flush(stream_id); + virtio_snd_pcm_flush(stream); + } + + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_OK); +} + +/* + * The actual processing done in virtio_snd_process_cmdq(). + * + * @s: VirtIOSound device + * @cmd: control command request + */ +static inline void +process_cmd(VirtIOSound *s, virtio_snd_ctrl_command *cmd) +{ + uint32_t code; + size_t msg_sz = iov_to_buf(cmd->elem->out_sg, + cmd->elem->out_num, + 0, + &cmd->ctrl, + sizeof(virtio_snd_hdr)); + + if (msg_sz != sizeof(virtio_snd_hdr)) { + /* + * TODO: do we need to set DEVICE_NEEDS_RESET? + */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: virtio-snd command size incorrect %zu vs \ + %zu\n", __func__, msg_sz, sizeof(virtio_snd_hdr)); + return; + } + + code = le32_to_cpu(cmd->ctrl.code); + + trace_virtio_snd_handle_code(code, print_code(code)); + + switch (code) { + case VIRTIO_SND_R_JACK_INFO: + case VIRTIO_SND_R_JACK_REMAP: + qemu_log_mask(LOG_UNIMP, + "virtio_snd: jack functionality is unimplemented.\n"); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); + break; + case VIRTIO_SND_R_PCM_INFO: + virtio_snd_handle_pcm_info(s, cmd); + break; + case VIRTIO_SND_R_PCM_START: + virtio_snd_handle_pcm_start_stop(s, cmd, true); + break; + case VIRTIO_SND_R_PCM_STOP: + virtio_snd_handle_pcm_start_stop(s, cmd, false); + break; + case VIRTIO_SND_R_PCM_SET_PARAMS: + virtio_snd_handle_pcm_set_params(s, cmd); + break; + case VIRTIO_SND_R_PCM_PREPARE: + virtio_snd_handle_pcm_prepare(s, cmd); + break; + case VIRTIO_SND_R_PCM_RELEASE: + virtio_snd_handle_pcm_release(s, cmd); + break; + case VIRTIO_SND_R_CHMAP_INFO: + qemu_log_mask(LOG_UNIMP, + "virtio_snd: chmap info functionality is unimplemented.\n"); + trace_virtio_snd_handle_chmap_info(); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); + break; + default: + /* error */ + error_report("virtio snd header not recognized: %"PRIu32, code); + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + } + + iov_from_buf(cmd->elem->in_sg, + cmd->elem->in_num, + 0, + &cmd->resp, + sizeof(virtio_snd_hdr)); + virtqueue_push(cmd->vq, cmd->elem, sizeof(virtio_snd_hdr)); + virtio_notify(VIRTIO_DEVICE(s), cmd->vq); +} + +/* + * Consume all elements in command queue. + * + * @s: VirtIOSound device + */ +static void virtio_snd_process_cmdq(VirtIOSound *s) +{ + virtio_snd_ctrl_command *cmd; + + if (unlikely(qatomic_read(&s->processing_cmdq))) { + return; + } + + WITH_QEMU_LOCK_GUARD(&s->cmdq_mutex) { + qatomic_set(&s->processing_cmdq, true); + while (!QTAILQ_EMPTY(&s->cmdq)) { + cmd = QTAILQ_FIRST(&s->cmdq); + + /* process command */ + process_cmd(s, cmd); + + QTAILQ_REMOVE(&s->cmdq, cmd, next); + + virtio_snd_ctrl_cmd_free(cmd); + } + qatomic_set(&s->processing_cmdq, false); + } +} + +/* + * The control message handler. Pops an element from the control virtqueue, + * and stores them to VirtIOSound's cmdq queue and finally calls + * virtio_snd_process_cmdq() for processing. + * + * @vdev: VirtIOSound device + * @vq: Control virtqueue + */ +static void virtio_snd_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOSound *s = VIRTIO_SND(vdev); + VirtQueueElement *elem; + virtio_snd_ctrl_command *cmd; + + trace_virtio_snd_handle_ctrl(vdev, vq); + + if (!virtio_queue_ready(vq)) { + return; + } + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + while (elem) { + cmd = g_new0(virtio_snd_ctrl_command, 1); + cmd->elem = elem; + cmd->vq = vq; + cmd->resp.code = cpu_to_le32(VIRTIO_SND_S_OK); + QTAILQ_INSERT_TAIL(&s->cmdq, cmd, next); + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + } + + virtio_snd_process_cmdq(s); +} + +/* + * The event virtqueue handler. + * Not implemented yet. + * + * @vdev: VirtIOSound device + * @vq: event vq + */ +static void virtio_snd_handle_event(VirtIODevice *vdev, VirtQueue *vq) +{ + qemu_log_mask(LOG_UNIMP, "virtio_snd: event queue is unimplemented.\n"); + trace_virtio_snd_handle_event(); +} + +static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOSoundPCMBuffer *buffer = NULL; + VirtIOSoundPCMStream *stream = NULL; + virtio_snd_pcm_status resp = { 0 }; + VirtIOSound *vsnd = VIRTIO_SND(vdev); + bool any = false; + + for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) { + stream = vsnd->pcm->streams[i]; + if (stream) { + any = false; + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + while (!QSIMPLEQ_EMPTY(&stream->invalid)) { + buffer = QSIMPLEQ_FIRST(&stream->invalid); + if (buffer->vq != vq) { + break; + } + any = true; + resp.status = cpu_to_le32(VIRTIO_SND_S_BAD_MSG); + iov_from_buf(buffer->elem->in_sg, + buffer->elem->in_num, + 0, + &resp, + sizeof(virtio_snd_pcm_status)); + virtqueue_push(vq, + buffer->elem, + sizeof(virtio_snd_pcm_status)); + QSIMPLEQ_REMOVE_HEAD(&stream->invalid, entry); + virtio_snd_pcm_buffer_free(buffer); + } + if (any) { + /* + * Notify vq about virtio_snd_pcm_status responses. + * Buffer responses must be notified separately later. + */ + virtio_notify(vdev, vq); + } + } + } + } +} + +/* + * The tx virtqueue handler. Makes the buffers available to their respective + * streams for consumption. + * + * @vdev: VirtIOSound device + * @vq: tx virtqueue + */ +static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOSound *s = VIRTIO_SND(vdev); + VirtIOSoundPCMStream *stream = NULL; + VirtIOSoundPCMBuffer *buffer; + VirtQueueElement *elem; + size_t msg_sz, size; + virtio_snd_pcm_xfer hdr; + uint32_t stream_id; + /* + * If any of the I/O messages are invalid, put them in stream->invalid and + * return them after the for loop. + */ + bool must_empty_invalid_queue = false; + + if (!virtio_queue_ready(vq)) { + return; + } + trace_virtio_snd_handle_tx_xfer(); + + for (;;) { + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + /* get the message hdr object */ + msg_sz = iov_to_buf(elem->out_sg, + elem->out_num, + 0, + &hdr, + sizeof(virtio_snd_pcm_xfer)); + if (msg_sz != sizeof(virtio_snd_pcm_xfer)) { + goto tx_err; + } + stream_id = le32_to_cpu(hdr.stream_id); + + if (stream_id >= s->snd_conf.streams + || s->pcm->streams[stream_id] == NULL) { + goto tx_err; + } + + stream = s->pcm->streams[stream_id]; + if (stream->info.direction != VIRTIO_SND_D_OUTPUT) { + goto tx_err; + } + + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + size = iov_size(elem->out_sg, elem->out_num) - msg_sz; + + buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer) + size); + buffer->elem = elem; + buffer->populated = false; + buffer->vq = vq; + buffer->size = size; + buffer->offset = 0; + + QSIMPLEQ_INSERT_TAIL(&stream->queue, buffer, entry); + } + continue; + +tx_err: + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + must_empty_invalid_queue = true; + buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer)); + buffer->elem = elem; + buffer->vq = vq; + QSIMPLEQ_INSERT_TAIL(&stream->invalid, buffer, entry); + } + } + + if (must_empty_invalid_queue) { + empty_invalid_queue(vdev, vq); + } +} + +/* + * The rx virtqueue handler. Makes the buffers available to their respective + * streams for consumption. + * + * @vdev: VirtIOSound device + * @vq: rx virtqueue + */ +static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOSound *s = VIRTIO_SND(vdev); + VirtIOSoundPCMStream *stream = NULL; + VirtIOSoundPCMBuffer *buffer; + VirtQueueElement *elem; + size_t msg_sz, size; + virtio_snd_pcm_xfer hdr; + uint32_t stream_id; + /* + * if any of the I/O messages are invalid, put them in stream->invalid and + * return them after the for loop. + */ + bool must_empty_invalid_queue = false; + + if (!virtio_queue_ready(vq)) { + return; + } + trace_virtio_snd_handle_rx_xfer(); + + for (;;) { + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + /* get the message hdr object */ + msg_sz = iov_to_buf(elem->out_sg, + elem->out_num, + 0, + &hdr, + sizeof(virtio_snd_pcm_xfer)); + if (msg_sz != sizeof(virtio_snd_pcm_xfer)) { + goto rx_err; + } + stream_id = le32_to_cpu(hdr.stream_id); + + if (stream_id >= s->snd_conf.streams + || !s->pcm->streams[stream_id]) { + goto rx_err; + } + + stream = s->pcm->streams[stream_id]; + if (stream == NULL || stream->info.direction != VIRTIO_SND_D_INPUT) { + goto rx_err; + } + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + size = iov_size(elem->in_sg, elem->in_num) - + sizeof(virtio_snd_pcm_status); + buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer) + size); + buffer->elem = elem; + buffer->vq = vq; + buffer->size = 0; + buffer->offset = 0; + QSIMPLEQ_INSERT_TAIL(&stream->queue, buffer, entry); + } + continue; + +rx_err: + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + must_empty_invalid_queue = true; + buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer)); + buffer->elem = elem; + buffer->vq = vq; + QSIMPLEQ_INSERT_TAIL(&stream->invalid, buffer, entry); + } + } + + if (must_empty_invalid_queue) { + empty_invalid_queue(vdev, vq); + } +} + +static uint64_t get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + /* + * virtio-v1.2-csd01, 5.14.3, + * Feature Bits + * None currently defined. + */ + VirtIOSound *s = VIRTIO_SND(vdev); + features |= s->features; + + trace_virtio_snd_get_features(vdev, features); + + return features; +} + +static void +virtio_snd_vm_state_change(void *opaque, bool running, + RunState state) +{ + if (running) { + trace_virtio_snd_vm_state_running(); + } else { + trace_virtio_snd_vm_state_stopped(); + } +} + +static void virtio_snd_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + VirtIOSound *vsnd = VIRTIO_SND(dev); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + virtio_snd_pcm_set_params default_params = { 0 }; + uint32_t status; + + vsnd->pcm = NULL; + vsnd->vmstate = + qemu_add_vm_change_state_handler(virtio_snd_vm_state_change, vsnd); + + trace_virtio_snd_realize(vsnd); + + vsnd->pcm = g_new0(VirtIOSoundPCM, 1); + vsnd->pcm->snd = vsnd; + vsnd->pcm->streams = + g_new0(VirtIOSoundPCMStream *, vsnd->snd_conf.streams); + vsnd->pcm->pcm_params = + g_new0(virtio_snd_pcm_set_params, vsnd->snd_conf.streams); + + virtio_init(vdev, VIRTIO_ID_SOUND, sizeof(virtio_snd_config)); + virtio_add_feature(&vsnd->features, VIRTIO_F_VERSION_1); + + /* set number of jacks and streams */ + if (vsnd->snd_conf.jacks > 8) { + error_setg(errp, + "Invalid number of jacks: %"PRIu32, + vsnd->snd_conf.jacks); + return; + } + if (vsnd->snd_conf.streams < 1 || vsnd->snd_conf.streams > 10) { + error_setg(errp, + "Invalid number of streams: %"PRIu32, + vsnd->snd_conf.streams); + return; + } + + if (vsnd->snd_conf.chmaps > VIRTIO_SND_CHMAP_MAX_SIZE) { + error_setg(errp, + "Invalid number of channel maps: %"PRIu32, + vsnd->snd_conf.chmaps); + return; + } + + AUD_register_card("virtio-sound", &vsnd->card, errp); + + /* set default params for all streams */ + default_params.features = 0; + default_params.buffer_bytes = cpu_to_le32(8192); + default_params.period_bytes = cpu_to_le32(2048); + default_params.channels = 2; + default_params.format = VIRTIO_SND_PCM_FMT_S16; + default_params.rate = VIRTIO_SND_PCM_RATE_48000; + vsnd->queues[VIRTIO_SND_VQ_CONTROL] = + virtio_add_queue(vdev, 64, virtio_snd_handle_ctrl); + vsnd->queues[VIRTIO_SND_VQ_EVENT] = + virtio_add_queue(vdev, 64, virtio_snd_handle_event); + vsnd->queues[VIRTIO_SND_VQ_TX] = + virtio_add_queue(vdev, 64, virtio_snd_handle_tx_xfer); + vsnd->queues[VIRTIO_SND_VQ_RX] = + virtio_add_queue(vdev, 64, virtio_snd_handle_rx_xfer); + qemu_mutex_init(&vsnd->cmdq_mutex); + QTAILQ_INIT(&vsnd->cmdq); + + for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) { + status = virtio_snd_set_pcm_params(vsnd, i, &default_params); + if (status != cpu_to_le32(VIRTIO_SND_S_OK)) { + error_setg(errp, + "Can't initalize stream params, device responded with %s.", + print_code(status)); + return; + } + status = virtio_snd_pcm_prepare(vsnd, i); + if (status != cpu_to_le32(VIRTIO_SND_S_OK)) { + error_setg(errp, + "Can't prepare streams, device responded with %s.", + print_code(status)); + return; + } + } +} + +static inline void return_tx_buffer(VirtIOSoundPCMStream *stream, + VirtIOSoundPCMBuffer *buffer) +{ + virtio_snd_pcm_status resp = { 0 }; + resp.status = cpu_to_le32(VIRTIO_SND_S_OK); + resp.latency_bytes = cpu_to_le32((uint32_t)buffer->size); + iov_from_buf(buffer->elem->in_sg, + buffer->elem->in_num, + 0, + &resp, + sizeof(virtio_snd_pcm_status)); + virtqueue_push(buffer->vq, + buffer->elem, + sizeof(virtio_snd_pcm_status)); + virtio_notify(VIRTIO_DEVICE(stream->s), buffer->vq); + QSIMPLEQ_REMOVE(&stream->queue, + buffer, + VirtIOSoundPCMBuffer, + entry); + virtio_snd_pcm_buffer_free(buffer); +} + +/* + * AUD_* output callback. + * + * @data: VirtIOSoundPCMStream stream + * @available: number of bytes that can be written with AUD_write() + */ +static void virtio_snd_pcm_out_cb(void *data, int available) +{ + VirtIOSoundPCMStream *stream = data; + VirtIOSoundPCMBuffer *buffer; + size_t size; + + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + while (!QSIMPLEQ_EMPTY(&stream->queue)) { + buffer = QSIMPLEQ_FIRST(&stream->queue); + if (!virtio_queue_ready(buffer->vq)) { + return; + } + if (!stream->active) { + /* Stream has stopped, so do not perform AUD_write. */ + return_tx_buffer(stream, buffer); + continue; + } + if (!buffer->populated) { + iov_to_buf(buffer->elem->out_sg, + buffer->elem->out_num, + sizeof(virtio_snd_pcm_xfer), + buffer->data, + buffer->size); + buffer->populated = true; + } + for (;;) { + size = AUD_write(stream->voice.out, + buffer->data + buffer->offset, + MIN(buffer->size, available)); + assert(size <= MIN(buffer->size, available)); + if (size == 0) { + /* break out of both loops */ + available = 0; + break; + } + buffer->size -= size; + buffer->offset += size; + available -= size; + if (buffer->size < 1) { + return_tx_buffer(stream, buffer); + break; + } + if (!available) { + break; + } + } + if (!available) { + break; + } + } + } +} + +/* + * Flush all buffer data from this input stream's queue into the driver's + * virtual queue. + * + * @stream: VirtIOSoundPCMStream *stream + */ +static inline void return_rx_buffer(VirtIOSoundPCMStream *stream, + VirtIOSoundPCMBuffer *buffer) +{ + virtio_snd_pcm_status resp = { 0 }; + resp.status = cpu_to_le32(VIRTIO_SND_S_OK); + resp.latency_bytes = 0; + /* Copy data -if any- to guest */ + iov_from_buf(buffer->elem->in_sg, + buffer->elem->in_num, + 0, + buffer->data, + buffer->size); + iov_from_buf(buffer->elem->in_sg, + buffer->elem->in_num, + buffer->size, + &resp, + sizeof(virtio_snd_pcm_status)); + virtqueue_push(buffer->vq, + buffer->elem, + sizeof(virtio_snd_pcm_status) + buffer->size); + virtio_notify(VIRTIO_DEVICE(stream->s), buffer->vq); + QSIMPLEQ_REMOVE(&stream->queue, + buffer, + VirtIOSoundPCMBuffer, + entry); + virtio_snd_pcm_buffer_free(buffer); +} + + +/* + * AUD_* input callback. + * + * @data: VirtIOSoundPCMStream stream + * @available: number of bytes that can be read with AUD_read() + */ +static void virtio_snd_pcm_in_cb(void *data, int available) +{ + VirtIOSoundPCMStream *stream = data; + VirtIOSoundPCMBuffer *buffer; + size_t size; + + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + while (!QSIMPLEQ_EMPTY(&stream->queue)) { + buffer = QSIMPLEQ_FIRST(&stream->queue); + if (!virtio_queue_ready(buffer->vq)) { + return; + } + if (!stream->active) { + /* Stream has stopped, so do not perform AUD_read. */ + return_rx_buffer(stream, buffer); + continue; + } + + for (;;) { + size = AUD_read(stream->voice.in, + buffer->data + buffer->size, + MIN(available, (stream->params.period_bytes - + buffer->size))); + if (!size) { + available = 0; + break; + } + buffer->size += size; + available -= size; + if (buffer->size >= stream->params.period_bytes) { + return_rx_buffer(stream, buffer); + break; + } + if (!available) { + break; + } + } + if (!available) { + break; + } + } + } +} + +/* + * Flush all buffer data from this output stream's queue into the driver's + * virtual queue. + * + * @stream: VirtIOSoundPCMStream *stream + */ +static inline void virtio_snd_pcm_flush(VirtIOSoundPCMStream *stream) +{ + VirtIOSoundPCMBuffer *buffer; + void (*cb)(VirtIOSoundPCMStream *, VirtIOSoundPCMBuffer *) = + (stream->info.direction == VIRTIO_SND_D_OUTPUT) ? return_tx_buffer : + return_rx_buffer; + + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + while (!QSIMPLEQ_EMPTY(&stream->queue)) { + buffer = QSIMPLEQ_FIRST(&stream->queue); + cb(stream, buffer); + } + } +} + +static void virtio_snd_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOSound *vsnd = VIRTIO_SND(dev); + VirtIOSoundPCMStream *stream; + + qemu_del_vm_change_state_handler(vsnd->vmstate); + trace_virtio_snd_unrealize(vsnd); + + if (vsnd->pcm) { + if (vsnd->pcm->streams) { + for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) { + stream = vsnd->pcm->streams[i]; + if (stream) { + virtio_snd_process_cmdq(stream->s); + virtio_snd_pcm_close(stream); + qemu_mutex_destroy(&stream->queue_mutex); + g_free(stream); + } + } + g_free(vsnd->pcm->streams); + } + g_free(vsnd->pcm->pcm_params); + g_free(vsnd->pcm); + vsnd->pcm = NULL; + } + AUD_remove_card(&vsnd->card); + qemu_mutex_destroy(&vsnd->cmdq_mutex); + virtio_delete_queue(vsnd->queues[VIRTIO_SND_VQ_CONTROL]); + virtio_delete_queue(vsnd->queues[VIRTIO_SND_VQ_EVENT]); + virtio_delete_queue(vsnd->queues[VIRTIO_SND_VQ_TX]); + virtio_delete_queue(vsnd->queues[VIRTIO_SND_VQ_RX]); + virtio_cleanup(vdev); +} + + +static void virtio_snd_reset(VirtIODevice *vdev) +{ + VirtIOSound *s = VIRTIO_SND(vdev); + virtio_snd_ctrl_command *cmd; + + WITH_QEMU_LOCK_GUARD(&s->cmdq_mutex) { + while (!QTAILQ_EMPTY(&s->cmdq)) { + cmd = QTAILQ_FIRST(&s->cmdq); + QTAILQ_REMOVE(&s->cmdq, cmd, next); + virtio_snd_ctrl_cmd_free(cmd); + } + } +} + +static void virtio_snd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + device_class_set_props(dc, virtio_snd_properties); + + dc->vmsd = &vmstate_virtio_snd; + vdc->vmsd = &vmstate_virtio_snd_device; + vdc->realize = virtio_snd_realize; + vdc->unrealize = virtio_snd_unrealize; + vdc->get_config = virtio_snd_get_config; + vdc->set_config = virtio_snd_set_config; + vdc->get_features = get_features; + vdc->reset = virtio_snd_reset; + vdc->legacy_features = 0; +} + +static const TypeInfo virtio_snd_types[] = { + { + .name = TYPE_VIRTIO_SND, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOSound), + .class_init = virtio_snd_class_init, + } +}; + +DEFINE_TYPES(virtio_snd_types) diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c index bfa53960c3..6d64ede94f 100644 --- a/hw/block/xen-block.c +++ b/hw/block/xen-block.c @@ -27,13 +27,119 @@ #include "sysemu/block-backend.h" #include "sysemu/iothread.h" #include "dataplane/xen-block.h" +#include "hw/xen/interface/io/xs_wire.h" #include "trace.h" +#define XVDA_MAJOR 202 +#define XVDQ_MAJOR (1 << 20) +#define XVDBGQCV_MAJOR ((1 << 21) - 1) +#define HDA_MAJOR 3 +#define HDC_MAJOR 22 +#define SDA_MAJOR 8 + + +static int vdev_to_diskno(unsigned int vdev_nr) +{ + switch (vdev_nr >> 8) { + case XVDA_MAJOR: + case SDA_MAJOR: + return (vdev_nr >> 4) & 0x15; + + case HDA_MAJOR: + return (vdev_nr >> 6) & 1; + + case HDC_MAJOR: + return ((vdev_nr >> 6) & 1) + 2; + + case XVDQ_MAJOR ... XVDBGQCV_MAJOR: + return (vdev_nr >> 8) & 0xfffff; + + default: + return -1; + } +} + +#define MAX_AUTO_VDEV 4096 + +/* + * Find a free device name in the xvda → xvdfan range and set it in + * blockdev->props.vdev. Our definition of "free" is that there must + * be no other disk or partition with the same disk number. + * + * You are technically permitted to have all of hda, hda1, sda, sda1, + * xvda and xvda1 as *separate* PV block devices with separate backing + * stores. That doesn't make it a good idea. This code will skip xvda + * if *any* of those "conflicting" devices already exists. + * + * The limit of xvdfan (disk 4095) is fairly arbitrary just to avoid a + * stupidly sized bitmap, but Linux as of v6.6 doesn't support anything + * higher than that anyway. + */ +static bool xen_block_find_free_vdev(XenBlockDevice *blockdev, Error **errp) +{ + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(blockdev))); + unsigned long used_devs[BITS_TO_LONGS(MAX_AUTO_VDEV)]; + XenBlockVdev *vdev = &blockdev->props.vdev; + char fe_path[XENSTORE_ABS_PATH_MAX + 1]; + char **existing_frontends; + unsigned int nr_existing = 0; + unsigned int vdev_nr; + int i, disk = 0; + + snprintf(fe_path, sizeof(fe_path), "/local/domain/%u/device/vbd", + blockdev->xendev.frontend_id); + + existing_frontends = qemu_xen_xs_directory(xenbus->xsh, XBT_NULL, fe_path, + &nr_existing); + if (!existing_frontends && errno != ENOENT) { + error_setg_errno(errp, errno, "cannot read %s", fe_path); + return false; + } + + memset(used_devs, 0, sizeof(used_devs)); + for (i = 0; i < nr_existing; i++) { + if (qemu_strtoui(existing_frontends[i], NULL, 10, &vdev_nr)) { + free(existing_frontends[i]); + continue; + } + + free(existing_frontends[i]); + + disk = vdev_to_diskno(vdev_nr); + if (disk < 0 || disk >= MAX_AUTO_VDEV) { + continue; + } + + set_bit(disk, used_devs); + } + free(existing_frontends); + + disk = find_first_zero_bit(used_devs, MAX_AUTO_VDEV); + if (disk == MAX_AUTO_VDEV) { + error_setg(errp, "cannot find device vdev for block device"); + return false; + } + + vdev->type = XEN_BLOCK_VDEV_TYPE_XVD; + vdev->partition = 0; + vdev->disk = disk; + if (disk < (1 << 4)) { + vdev->number = (XVDA_MAJOR << 8) | (disk << 4); + } else { + vdev->number = (XVDQ_MAJOR << 8) | (disk << 8); + } + return true; +} + static char *xen_block_get_name(XenDevice *xendev, Error **errp) { XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev); XenBlockVdev *vdev = &blockdev->props.vdev; + if (vdev->type == XEN_BLOCK_VDEV_TYPE_INVALID && + !xen_block_find_free_vdev(blockdev, errp)) { + return NULL; + } return g_strdup_printf("%lu", vdev->number); } @@ -482,10 +588,10 @@ static void xen_block_set_vdev(Object *obj, Visitor *v, const char *name, case XEN_BLOCK_VDEV_TYPE_DP: case XEN_BLOCK_VDEV_TYPE_XVD: if (vdev->disk < (1 << 4) && vdev->partition < (1 << 4)) { - vdev->number = (202 << 8) | (vdev->disk << 4) | + vdev->number = (XVDA_MAJOR << 8) | (vdev->disk << 4) | vdev->partition; } else if (vdev->disk < (1 << 20) && vdev->partition < (1 << 8)) { - vdev->number = (1 << 28) | (vdev->disk << 8) | + vdev->number = (XVDQ_MAJOR << 8) | (vdev->disk << 8) | vdev->partition; } else { goto invalid; @@ -495,10 +601,11 @@ static void xen_block_set_vdev(Object *obj, Visitor *v, const char *name, case XEN_BLOCK_VDEV_TYPE_HD: if ((vdev->disk == 0 || vdev->disk == 1) && vdev->partition < (1 << 6)) { - vdev->number = (3 << 8) | (vdev->disk << 6) | vdev->partition; + vdev->number = (HDA_MAJOR << 8) | (vdev->disk << 6) | + vdev->partition; } else if ((vdev->disk == 2 || vdev->disk == 3) && vdev->partition < (1 << 6)) { - vdev->number = (22 << 8) | ((vdev->disk - 2) << 6) | + vdev->number = (HDC_MAJOR << 8) | ((vdev->disk - 2) << 6) | vdev->partition; } else { goto invalid; @@ -507,7 +614,8 @@ static void xen_block_set_vdev(Object *obj, Visitor *v, const char *name, case XEN_BLOCK_VDEV_TYPE_SD: if (vdev->disk < (1 << 4) && vdev->partition < (1 << 4)) { - vdev->number = (8 << 8) | (vdev->disk << 4) | vdev->partition; + vdev->number = (SDA_MAJOR << 8) | (vdev->disk << 4) | + vdev->partition; } else { goto invalid; } diff --git a/hw/char/trace-events b/hw/char/trace-events index babf4d35ea..7a398c82a5 100644 --- a/hw/char/trace-events +++ b/hw/char/trace-events @@ -105,3 +105,11 @@ cadence_uart_baudrate(unsigned baudrate) "baudrate %u" # sh_serial.c sh_serial_read(char *id, unsigned size, uint64_t offs, uint64_t val) " %s size %d offs 0x%02" PRIx64 " -> 0x%02" PRIx64 sh_serial_write(char *id, unsigned size, uint64_t offs, uint64_t val) "%s size %d offs 0x%02" PRIx64 " <- 0x%02" PRIx64 + +# xen_console.c +xen_console_connect(unsigned int idx, unsigned int ring_ref, unsigned int port, unsigned int limit) "idx %u ring_ref %u port %u limit %u" +xen_console_disconnect(unsigned int idx) "idx %u" +xen_console_unrealize(unsigned int idx) "idx %u" +xen_console_realize(unsigned int idx, const char *chrdev) "idx %u chrdev %s" +xen_console_device_create(unsigned int idx) "idx %u" +xen_console_device_destroy(unsigned int idx) "idx %u" diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c index 810dae3f44..5cbee2f184 100644 --- a/hw/char/xen_console.c +++ b/hw/char/xen_console.c @@ -20,15 +20,22 @@ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include <sys/select.h> #include <termios.h> #include "qapi/error.h" #include "sysemu/sysemu.h" #include "chardev/char-fe.h" -#include "hw/xen/xen-legacy-backend.h" - +#include "hw/xen/xen-backend.h" +#include "hw/xen/xen-bus-helper.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/xen/interface/io/console.h" +#include "hw/xen/interface/io/xs_wire.h" +#include "hw/xen/interface/grant_table.h" +#include "hw/i386/kvm/xen_primary_console.h" +#include "trace.h" struct buffer { uint8_t *data; @@ -39,16 +46,22 @@ struct buffer { }; struct XenConsole { - struct XenLegacyDevice xendev; /* must be first */ + struct XenDevice xendev; /* must be first */ + XenEventChannel *event_channel; + int dev; struct buffer buffer; - char console[XEN_BUFSIZE]; - int ring_ref; + char *fe_path; + unsigned int ring_ref; void *sring; CharBackend chr; int backlog; }; +typedef struct XenConsole XenConsole; + +#define TYPE_XEN_CONSOLE_DEVICE "xen-console" +OBJECT_DECLARE_SIMPLE_TYPE(XenConsole, XEN_CONSOLE_DEVICE) -static void buffer_append(struct XenConsole *con) +static bool buffer_append(XenConsole *con) { struct buffer *buffer = &con->buffer; XENCONS_RING_IDX cons, prod, size; @@ -60,7 +73,7 @@ static void buffer_append(struct XenConsole *con) size = prod - cons; if ((size == 0) || (size > sizeof(intf->out))) - return; + return false; if ((buffer->capacity - buffer->size) < size) { buffer->capacity += (size + 1024); @@ -73,7 +86,7 @@ static void buffer_append(struct XenConsole *con) xen_mb(); intf->out_cons = cons; - xen_pv_send_notify(&con->xendev); + xen_device_notify_event_channel(XEN_DEVICE(con), con->event_channel, NULL); if (buffer->max_capacity && buffer->size > buffer->max_capacity) { @@ -89,6 +102,7 @@ static void buffer_append(struct XenConsole *con) if (buffer->consumed > buffer->max_capacity - over) buffer->consumed = buffer->max_capacity - over; } + return true; } static void buffer_advance(struct buffer *buffer, size_t len) @@ -100,7 +114,7 @@ static void buffer_advance(struct buffer *buffer, size_t len) } } -static int ring_free_bytes(struct XenConsole *con) +static int ring_free_bytes(XenConsole *con) { struct xencons_interface *intf = con->sring; XENCONS_RING_IDX cons, prod, space; @@ -118,13 +132,13 @@ static int ring_free_bytes(struct XenConsole *con) static int xencons_can_receive(void *opaque) { - struct XenConsole *con = opaque; + XenConsole *con = opaque; return ring_free_bytes(con); } static void xencons_receive(void *opaque, const uint8_t *buf, int len) { - struct XenConsole *con = opaque; + XenConsole *con = opaque; struct xencons_interface *intf = con->sring; XENCONS_RING_IDX prod; int i, max; @@ -141,10 +155,10 @@ static void xencons_receive(void *opaque, const uint8_t *buf, int len) } xen_wmb(); intf->in_prod = prod; - xen_pv_send_notify(&con->xendev); + xen_device_notify_event_channel(XEN_DEVICE(con), con->event_channel, NULL); } -static void xencons_send(struct XenConsole *con) +static bool xencons_send(XenConsole *con) { ssize_t len, size; @@ -159,174 +173,472 @@ static void xencons_send(struct XenConsole *con) if (len < 1) { if (!con->backlog) { con->backlog = 1; - xen_pv_printf(&con->xendev, 1, - "backlog piling up, nobody listening?\n"); } } else { buffer_advance(&con->buffer, len); if (con->backlog && len == size) { con->backlog = 0; - xen_pv_printf(&con->xendev, 1, "backlog is gone\n"); } } + return len > 0; } /* -------------------------------------------------------------------- */ -static int store_con_info(struct XenConsole *con) +static bool con_event(void *_xendev) { - Chardev *cs = qemu_chr_fe_get_driver(&con->chr); - char *pts = NULL; - char *dom_path; - g_autoptr(GString) path = NULL; + XenConsole *con = XEN_CONSOLE_DEVICE(_xendev); + bool done_something; - /* Only continue if we're talking to a pty. */ - if (!CHARDEV_IS_PTY(cs)) { - return 0; + if (xen_device_backend_get_state(&con->xendev) != XenbusStateConnected) { + return false; } - pts = cs->filename + 4; - dom_path = qemu_xen_xs_get_domain_path(xenstore, xen_domid); - if (!dom_path) { - return 0; + done_something = buffer_append(con); + + if (con->buffer.size - con->buffer.consumed) { + done_something |= xencons_send(con); } + return done_something; +} - path = g_string_new(dom_path); - free(dom_path); +/* -------------------------------------------------------------------- */ - if (con->xendev.dev) { - g_string_append_printf(path, "/device/console/%d", con->xendev.dev); - } else { - g_string_append(path, "/console"); +static bool xen_console_connect(XenDevice *xendev, Error **errp) +{ + XenConsole *con = XEN_CONSOLE_DEVICE(xendev); + unsigned int port, limit; + + if (xen_device_frontend_scanf(xendev, "ring-ref", "%u", + &con->ring_ref) != 1) { + error_setg(errp, "failed to read ring-ref"); + return false; + } + + if (xen_device_frontend_scanf(xendev, "port", "%u", &port) != 1) { + error_setg(errp, "failed to read remote port"); + return false; + } + + if (xen_device_frontend_scanf(xendev, "limit", "%u", &limit) == 1) { + con->buffer.max_capacity = limit; + } + + con->event_channel = xen_device_bind_event_channel(xendev, port, + con_event, + con, + errp); + if (!con->event_channel) { + return false; } - g_string_append(path, "/tty"); - if (xenstore_write_str(con->console, path->str, pts)) { - fprintf(stderr, "xenstore_write_str for '%s' fail", path->str); - return -1; + switch (con->dev) { + case 0: + /* + * The primary console is special. For real Xen the ring-ref is + * actually a GFN which needs to be mapped as foreignmem. + */ + if (xen_mode != XEN_EMULATE) { + xen_pfn_t mfn = (xen_pfn_t)con->ring_ref; + con->sring = qemu_xen_foreignmem_map(xendev->frontend_id, NULL, + PROT_READ | PROT_WRITE, + 1, &mfn, NULL); + if (!con->sring) { + error_setg(errp, "failed to map console page"); + return false; + } + break; + } + + /* + * For Xen emulation, we still follow the convention of ring-ref + * holding the GFN, but we map the fixed GNTTAB_RESERVED_CONSOLE + * grant ref because there is no implementation of foreignmem + * operations for emulated mode. The emulation code which handles + * the guest-side page and event channel also needs to be informed + * of the backend event channel port, in order to reconnect to it + * after a soft reset. + */ + xen_primary_console_set_be_port( + xen_event_channel_get_local_port(con->event_channel)); + con->ring_ref = GNTTAB_RESERVED_CONSOLE; + /* fallthrough */ + default: + con->sring = xen_device_map_grant_refs(xendev, + &con->ring_ref, 1, + PROT_READ | PROT_WRITE, + errp); + if (!con->sring) { + error_prepend(errp, "failed to map console grant ref: "); + return false; + } + break; } - return 0; + + trace_xen_console_connect(con->dev, con->ring_ref, port, + con->buffer.max_capacity); + + qemu_chr_fe_set_handlers(&con->chr, xencons_can_receive, + xencons_receive, NULL, NULL, con, NULL, + true); + return true; } -static int con_init(struct XenLegacyDevice *xendev) +static void xen_console_disconnect(XenDevice *xendev, Error **errp) { - struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); - char *type, *dom, label[32]; - int ret = 0; - const char *output; - - /* setup */ - dom = qemu_xen_xs_get_domain_path(xenstore, con->xendev.dom); - if (!xendev->dev) { - snprintf(con->console, sizeof(con->console), "%s/console", dom); - } else { - snprintf(con->console, sizeof(con->console), "%s/device/console/%d", dom, xendev->dev); + XenConsole *con = XEN_CONSOLE_DEVICE(xendev); + + trace_xen_console_disconnect(con->dev); + + qemu_chr_fe_set_handlers(&con->chr, NULL, NULL, NULL, NULL, + con, NULL, true); + + if (con->event_channel) { + xen_device_unbind_event_channel(xendev, con->event_channel, + errp); + con->event_channel = NULL; + + if (xen_mode == XEN_EMULATE && !con->dev) { + xen_primary_console_set_be_port(0); + } } - free(dom); - type = xenstore_read_str(con->console, "type"); - if (!type || strcmp(type, "ioemu") != 0) { - xen_pv_printf(xendev, 1, "not for me (type=%s)\n", type); - ret = -1; - goto out; + if (con->sring) { + if (!con->dev && xen_mode != XEN_EMULATE) { + qemu_xen_foreignmem_unmap(con->sring, 1); + } else { + xen_device_unmap_grant_refs(xendev, con->sring, + &con->ring_ref, 1, errp); + } + con->sring = NULL; } +} + +static void xen_console_frontend_changed(XenDevice *xendev, + enum xenbus_state frontend_state, + Error **errp) +{ + ERRP_GUARD(); + enum xenbus_state backend_state = xen_device_backend_get_state(xendev); + + switch (frontend_state) { + case XenbusStateInitialised: + case XenbusStateConnected: + if (backend_state == XenbusStateConnected) { + break; + } - output = xenstore_read_str(con->console, "output"); + xen_console_disconnect(xendev, errp); + if (*errp) { + break; + } - /* no Xen override, use qemu output device */ - if (output == NULL) { - if (con->xendev.dev) { - qemu_chr_fe_init(&con->chr, serial_hd(con->xendev.dev), - &error_abort); + if (!xen_console_connect(xendev, errp)) { + xen_device_backend_set_state(xendev, XenbusStateClosing); + break; } - } else { - snprintf(label, sizeof(label), "xencons%d", con->xendev.dev); - qemu_chr_fe_init(&con->chr, - /* - * FIXME: sure we want to support implicit - * muxed monitors here? - */ - qemu_chr_new_mux_mon(label, output, NULL), - &error_abort); + + xen_device_backend_set_state(xendev, XenbusStateConnected); + break; + + case XenbusStateClosing: + xen_device_backend_set_state(xendev, XenbusStateClosing); + break; + + case XenbusStateClosed: + case XenbusStateUnknown: + xen_console_disconnect(xendev, errp); + if (*errp) { + break; + } + + xen_device_backend_set_state(xendev, XenbusStateClosed); + break; + + default: + break; } +} - store_con_info(con); +static char *xen_console_get_name(XenDevice *xendev, Error **errp) +{ + XenConsole *con = XEN_CONSOLE_DEVICE(xendev); + + if (con->dev == -1) { + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + char fe_path[XENSTORE_ABS_PATH_MAX + 1]; + int idx = (xen_mode == XEN_EMULATE) ? 0 : 1; + char *value; + + /* Theoretically we could go up to INT_MAX here but that's overkill */ + while (idx < 100) { + if (!idx) { + snprintf(fe_path, sizeof(fe_path), + "/local/domain/%u/console", xendev->frontend_id); + } else { + snprintf(fe_path, sizeof(fe_path), + "/local/domain/%u/device/console/%u", + xendev->frontend_id, idx); + } + value = qemu_xen_xs_read(xenbus->xsh, XBT_NULL, fe_path, NULL); + if (!value) { + if (errno == ENOENT) { + con->dev = idx; + goto found; + } + error_setg(errp, "cannot read %s: %s", fe_path, + strerror(errno)); + return NULL; + } + free(value); + idx++; + } + error_setg(errp, "cannot find device index for console device"); + return NULL; + } + found: + return g_strdup_printf("%u", con->dev); +} -out: - g_free(type); - return ret; +static void xen_console_unrealize(XenDevice *xendev) +{ + XenConsole *con = XEN_CONSOLE_DEVICE(xendev); + + trace_xen_console_unrealize(con->dev); + + /* Disconnect from the frontend in case this has not already happened */ + xen_console_disconnect(xendev, NULL); + + qemu_chr_fe_deinit(&con->chr, false); } -static int con_initialise(struct XenLegacyDevice *xendev) +static void xen_console_realize(XenDevice *xendev, Error **errp) { - struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); - int limit; - - if (xenstore_read_int(con->console, "ring-ref", &con->ring_ref) == -1) - return -1; - if (xenstore_read_int(con->console, "port", &con->xendev.remote_port) == -1) - return -1; - if (xenstore_read_int(con->console, "limit", &limit) == 0) - con->buffer.max_capacity = limit; + ERRP_GUARD(); + XenConsole *con = XEN_CONSOLE_DEVICE(xendev); + Chardev *cs = qemu_chr_fe_get_driver(&con->chr); + unsigned int u; - if (!xendev->dev) { - xen_pfn_t mfn = con->ring_ref; - con->sring = qemu_xen_foreignmem_map(con->xendev.dom, NULL, - PROT_READ | PROT_WRITE, - 1, &mfn, NULL); + if (!cs) { + error_setg(errp, "no backing character device"); + return; + } + + if (con->dev == -1) { + error_setg(errp, "no device index provided"); + return; + } + + /* + * The Xen primary console is special. The ring-ref is actually a GFN to + * be mapped directly as foreignmem (not a grant ref), and the guest port + * was allocated *for* the guest by the toolstack. The guest gets these + * through HVMOP_get_param and can use the console long before it's got + * XenStore up and running. We cannot create those for a true Xen guest, + * but we can for Xen emulation. + */ + if (!con->dev) { + if (xen_mode == XEN_EMULATE) { + xen_primary_console_create(); + } else if (xen_device_frontend_scanf(xendev, "ring-ref", "%u", &u) + != 1 || + xen_device_frontend_scanf(xendev, "port", "%u", &u) != 1) { + error_setg(errp, "cannot create primary Xen console"); + return; + } + } + + trace_xen_console_realize(con->dev, object_get_typename(OBJECT(cs))); + + if (CHARDEV_IS_PTY(cs)) { + /* Strip the leading 'pty:' */ + xen_device_frontend_printf(xendev, "tty", "%s", cs->filename + 4); + } + + /* No normal PV driver initialization for the primary console under Xen */ + if (!con->dev && xen_mode != XEN_EMULATE) { + xen_console_connect(xendev, errp); + } +} + +static char *console_frontend_path(struct qemu_xs_handle *xenstore, + unsigned int dom_id, unsigned int dev) +{ + if (!dev) { + return g_strdup_printf("/local/domain/%u/console", dom_id); } else { - con->sring = xen_be_map_grant_ref(xendev, con->ring_ref, - PROT_READ | PROT_WRITE); + return g_strdup_printf("/local/domain/%u/device/console/%u", dom_id, + dev); } - if (!con->sring) - return -1; +} - xen_be_bind_evtchn(&con->xendev); - qemu_chr_fe_set_handlers(&con->chr, xencons_can_receive, - xencons_receive, NULL, NULL, con, NULL, true); - - xen_pv_printf(xendev, 1, - "ring mfn %d, remote port %d, local port %d, limit %zd\n", - con->ring_ref, - con->xendev.remote_port, - con->xendev.local_port, - con->buffer.max_capacity); - return 0; +static char *xen_console_get_frontend_path(XenDevice *xendev, Error **errp) +{ + XenConsole *con = XEN_CONSOLE_DEVICE(xendev); + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + char *ret = console_frontend_path(xenbus->xsh, xendev->frontend_id, + con->dev); + + if (!ret) { + error_setg(errp, "failed to create frontend path"); + } + return ret; } -static void con_disconnect(struct XenLegacyDevice *xendev) + +static Property xen_console_properties[] = { + DEFINE_PROP_CHR("chardev", XenConsole, chr), + DEFINE_PROP_INT32("idx", XenConsole, dev, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xen_console_class_init(ObjectClass *class, void *data) { - struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); + DeviceClass *dev_class = DEVICE_CLASS(class); + XenDeviceClass *xendev_class = XEN_DEVICE_CLASS(class); + + xendev_class->backend = "console"; + xendev_class->device = "console"; + xendev_class->get_name = xen_console_get_name; + xendev_class->realize = xen_console_realize; + xendev_class->frontend_changed = xen_console_frontend_changed; + xendev_class->unrealize = xen_console_unrealize; + xendev_class->get_frontend_path = xen_console_get_frontend_path; + + device_class_set_props(dev_class, xen_console_properties); +} - qemu_chr_fe_deinit(&con->chr, false); - xen_pv_unbind_evtchn(&con->xendev); +static const TypeInfo xen_console_type_info = { + .name = TYPE_XEN_CONSOLE_DEVICE, + .parent = TYPE_XEN_DEVICE, + .instance_size = sizeof(XenConsole), + .class_init = xen_console_class_init, +}; - if (con->sring) { - if (!xendev->dev) { - qemu_xen_foreignmem_unmap(con->sring, 1); - } else { - xen_be_unmap_grant_ref(xendev, con->sring, con->ring_ref); +static void xen_console_register_types(void) +{ + type_register_static(&xen_console_type_info); +} + +type_init(xen_console_register_types) + +/* Called to instantiate a XenConsole when the backend is detected. */ +static void xen_console_device_create(XenBackendInstance *backend, + QDict *opts, Error **errp) +{ + ERRP_GUARD(); + XenBus *xenbus = xen_backend_get_bus(backend); + const char *name = xen_backend_get_name(backend); + unsigned long number; + char *fe = NULL, *type = NULL, *output = NULL; + char label[32]; + XenDevice *xendev = NULL; + XenConsole *con; + Chardev *cd = NULL; + struct qemu_xs_handle *xsh = xenbus->xsh; + + if (qemu_strtoul(name, NULL, 10, &number) || number > INT_MAX) { + error_setg(errp, "failed to parse name '%s'", name); + goto fail; + } + + trace_xen_console_device_create(number); + + fe = console_frontend_path(xsh, xen_domid, number); + if (fe == NULL) { + error_setg(errp, "failed to generate frontend path"); + goto fail; + } + + if (xs_node_scanf(xsh, XBT_NULL, fe, "type", errp, "%ms", &type) != 1) { + error_prepend(errp, "failed to read console device type: "); + goto fail; + } + + if (strcmp(type, "ioemu")) { + error_setg(errp, "declining to handle console type '%s'", + type); + goto fail; + } + + xendev = XEN_DEVICE(qdev_new(TYPE_XEN_CONSOLE_DEVICE)); + con = XEN_CONSOLE_DEVICE(xendev); + + con->dev = number; + + snprintf(label, sizeof(label), "xencons%ld", number); + + if (xs_node_scanf(xsh, XBT_NULL, fe, "output", NULL, "%ms", &output) == 1) { + /* + * FIXME: sure we want to support implicit + * muxed monitors here? + */ + cd = qemu_chr_new_mux_mon(label, output, NULL); + if (!cd) { + error_setg(errp, "console: No valid chardev found at '%s': ", + output); + goto fail; } - con->sring = NULL; + } else if (number) { + cd = serial_hd(number); + if (!cd) { + error_prepend(errp, "console: No serial device #%ld found: ", + number); + goto fail; + } + } else { + /* No 'output' node on primary console: use null. */ + cd = qemu_chr_new(label, "null", NULL); + if (!cd) { + error_setg(errp, "console: failed to create null device"); + goto fail; + } + } + + if (!qemu_chr_fe_init(&con->chr, cd, errp)) { + error_prepend(errp, "console: failed to initialize backing chardev: "); + goto fail; + } + + if (qdev_realize_and_unref(DEVICE(xendev), BUS(xenbus), errp)) { + xen_backend_set_device(backend, xendev); + goto done; + } + + error_prepend(errp, "realization of console device %lu failed: ", + number); + + fail: + if (xendev) { + object_unparent(OBJECT(xendev)); } + done: + g_free(fe); + free(type); + free(output); } -static void con_event(struct XenLegacyDevice *xendev) +static void xen_console_device_destroy(XenBackendInstance *backend, + Error **errp) { - struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); + ERRP_GUARD(); + XenDevice *xendev = xen_backend_get_device(backend); + XenConsole *con = XEN_CONSOLE_DEVICE(xendev); - buffer_append(con); - if (con->buffer.size - con->buffer.consumed) - xencons_send(con); -} + trace_xen_console_device_destroy(con->dev); -/* -------------------------------------------------------------------- */ + object_unparent(OBJECT(xendev)); +} -struct XenDevOps xen_console_ops = { - .size = sizeof(struct XenConsole), - .flags = DEVOPS_FLAG_IGNORE_STATE|DEVOPS_FLAG_NEED_GNTDEV, - .init = con_init, - .initialise = con_initialise, - .event = con_event, - .disconnect = con_disconnect, +static const XenBackendInfo xen_console_backend_info = { + .type = "console", + .create = xen_console_device_create, + .destroy = xen_console_device_destroy, }; + +static void xen_console_register_backend(void) +{ + xen_backend_register(&xen_console_backend_info); +} + +xen_backend_init(xen_console_register_backend); diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index bab8942c30..d4112b8919 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -136,10 +136,7 @@ static void cpu_common_reset_hold(Object *obj) cpu->crash_occurred = false; cpu->cflags_next_tb = -1; - if (tcg_enabled()) { - tcg_flush_jmp_cache(cpu); - tcg_flush_softmmu_tlb(cpu); - } + cpu_exec_reset_hold(cpu); } static bool cpu_common_has_work(CPUState *cs) @@ -149,10 +146,18 @@ static bool cpu_common_has_work(CPUState *cs) ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) { - CPUClass *cc = CPU_CLASS(object_class_by_name(typename)); - - assert(cpu_model && cc->class_by_name); - return cc->class_by_name(cpu_model); + ObjectClass *oc; + CPUClass *cc; + + oc = object_class_by_name(typename); + cc = CPU_CLASS(oc); + assert(cc->class_by_name); + assert(cpu_model); + oc = cc->class_by_name(cpu_model); + if (oc == NULL || object_class_is_abstract(oc)) { + return NULL; + } + return oc; } static void cpu_common_parse_features(const char *typename, char *features, diff --git a/hw/core/loader.c b/hw/core/loader.c index b7bb44b7f7..3c79283777 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -1070,7 +1070,7 @@ static void *rom_set_mr(Rom *rom, Object *owner, const char *name, bool ro) ssize_t rom_add_file(const char *file, const char *fw_dir, hwaddr addr, int32_t bootindex, - bool option_rom, MemoryRegion *mr, + bool has_option_rom, MemoryRegion *mr, AddressSpace *as) { MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); @@ -1139,7 +1139,7 @@ ssize_t rom_add_file(const char *file, const char *fw_dir, basename); snprintf(devpath, sizeof(devpath), "/rom@%s", fw_file_name); - if ((!option_rom || mc->option_rom_has_mr) && mc->rom_file_has_mr) { + if ((!has_option_rom || mc->option_rom_has_mr) && mc->rom_file_has_mr) { data = rom_set_mr(rom, OBJECT(fw_cfg), devpath, true); } else { data = rom->data; diff --git a/hw/cxl/cxl-cdat.c b/hw/cxl/cxl-cdat.c index d246d6885b..639a2db3e1 100644 --- a/hw/cxl/cxl-cdat.c +++ b/hw/cxl/cxl-cdat.c @@ -60,7 +60,8 @@ static void ct3_build_cdat(CDATObject *cdat, Error **errp) return; } - cdat->built_buf_len = cdat->build_cdat_table(&cdat->built_buf, cdat->private); + cdat->built_buf_len = cdat->build_cdat_table(&cdat->built_buf, + cdat->private); if (!cdat->built_buf_len) { /* Build later as not all data available yet */ diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c index f3bbf0fd13..d0245cc55d 100644 --- a/hw/cxl/cxl-component-utils.c +++ b/hw/cxl/cxl-component-utils.c @@ -67,16 +67,24 @@ static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset, CXLComponentState *cxl_cstate = opaque; ComponentRegisters *cregs = &cxl_cstate->crb; - if (size == 8) { + switch (size) { + case 4: + if (cregs->special_ops && cregs->special_ops->read) { + return cregs->special_ops->read(cxl_cstate, offset, 4); + } else { + QEMU_BUILD_BUG_ON(sizeof(*cregs->cache_mem_registers) != 4); + return cregs->cache_mem_registers[offset / 4]; + } + case 8: qemu_log_mask(LOG_UNIMP, "CXL 8 byte cache mem registers not implemented\n"); return 0; - } - - if (cregs->special_ops && cregs->special_ops->read) { - return cregs->special_ops->read(cxl_cstate, offset, size); - } else { - return cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; + default: + /* + * In line with specifiction limitaions on access sizes, this + * routine is not called with other sizes. + */ + g_assert_not_reached(); } } @@ -117,25 +125,37 @@ static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value, ComponentRegisters *cregs = &cxl_cstate->crb; uint32_t mask; - if (size == 8) { - qemu_log_mask(LOG_UNIMP, - "CXL 8 byte cache mem registers not implemented\n"); + switch (size) { + case 4: { + QEMU_BUILD_BUG_ON(sizeof(*cregs->cache_mem_regs_write_mask) != 4); + QEMU_BUILD_BUG_ON(sizeof(*cregs->cache_mem_registers) != 4); + mask = cregs->cache_mem_regs_write_mask[offset / 4]; + value &= mask; + /* RO bits should remain constant. Done by reading existing value */ + value |= ~mask & cregs->cache_mem_registers[offset / 4]; + if (cregs->special_ops && cregs->special_ops->write) { + cregs->special_ops->write(cxl_cstate, offset, value, size); + return; + } + + if (offset >= A_CXL_HDM_DECODER_CAPABILITY && + offset <= A_CXL_HDM_DECODER3_TARGET_LIST_HI) { + dumb_hdm_handler(cxl_cstate, offset, value); + } else { + cregs->cache_mem_registers[offset / 4] = value; + } return; } - mask = cregs->cache_mem_regs_write_mask[offset / sizeof(*cregs->cache_mem_regs_write_mask)]; - value &= mask; - /* RO bits should remain constant. Done by reading existing value */ - value |= ~mask & cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; - if (cregs->special_ops && cregs->special_ops->write) { - cregs->special_ops->write(cxl_cstate, offset, value, size); + case 8: + qemu_log_mask(LOG_UNIMP, + "CXL 8 byte cache mem registers not implemented\n"); return; - } - - if (offset >= A_CXL_HDM_DECODER_CAPABILITY && - offset <= A_CXL_HDM_DECODER3_TARGET_LIST_HI) { - dumb_hdm_handler(cxl_cstate, offset, value); - } else { - cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)] = value; + default: + /* + * In line with specifiction limitaions on access sizes, this + * routine is not called with other sizes. + */ + g_assert_not_reached(); } } @@ -221,7 +241,8 @@ static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk, ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1); ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1); ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1); - ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, + POISON_ON_ERR_CAP, 0); ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL, HDM_DECODER_ENABLE, 0); write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3; @@ -244,15 +265,16 @@ static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk, } } -void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk, +void cxl_component_register_init_common(uint32_t *reg_state, + uint32_t *write_msk, enum reg_type type) { int caps = 0; /* - * In CXL 2.0 the capabilities required for each CXL component are such that, - * with the ordering chosen here, a single number can be used to define - * which capabilities should be provided. + * In CXL 2.0 the capabilities required for each CXL component are such + * that, with the ordering chosen here, a single number can be used to + * define which capabilities should be provided. */ switch (type) { case CXL2_DOWNSTREAM_PORT: @@ -283,7 +305,6 @@ void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps); #define init_cap_reg(reg, id, version) \ - QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0); \ do { \ int which = R_CXL_##reg##_CAPABILITY_HEADER; \ reg_state[which] = FIELD_DP32(reg_state[which], \ @@ -373,26 +394,35 @@ void cxl_component_create_dvsec(CXLComponentState *cxl, case NON_CXL_FUNCTION_MAP_DVSEC: break; /* Not yet implemented */ case EXTENSIONS_PORT_DVSEC: - wmask[offset + offsetof(CXLDVSECPortExtensions, control)] = 0x0F; - wmask[offset + offsetof(CXLDVSECPortExtensions, control) + 1] = 0x40; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_base)] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_limit)] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base)] = 0xF0; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base) + 1] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit)] = 0xF0; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit) + 1] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base)] = 0xF0; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base) + 1] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit)] = 0xF0; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit) + 1] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high)] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 1] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 2] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 3] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high)] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 1] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 2] = 0xFF; - wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 3] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, control)] = 0x0F; + wmask[offset + offsetof(CXLDVSECPortExt, control) + 1] = 0x40; + wmask[offset + offsetof(CXLDVSECPortExt, alt_bus_base)] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_bus_limit)] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_memory_base)] = 0xF0; + wmask[offset + offsetof(CXLDVSECPortExt, alt_memory_base) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_memory_limit)] = 0xF0; + wmask[offset + offsetof(CXLDVSECPortExt, alt_memory_limit) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base)] = 0xF0; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base) + 1] = 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit)] = 0xF0; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit) + 1] = + 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base_high)] = + 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base_high) + 1] = + 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base_high) + 2] = + 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base_high) + 3] = + 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit_high)] = + 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit_high) + 1] = + 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit_high) + 2] = + 0xFF; + wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit_high) + 3] = + 0xFF; break; case GPF_PORT_DVSEC: wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F; @@ -420,7 +450,7 @@ void cxl_component_create_dvsec(CXLComponentState *cxl, default: /* Registers are RO for other component types */ break; } - /* There are rw1cs bits in the status register but never set currently */ + /* There are rw1cs bits in the status register but never set */ break; } diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c index bd68328032..61a3c4dc2e 100644 --- a/hw/cxl/cxl-device-utils.c +++ b/hw/cxl/cxl-device-utils.c @@ -32,10 +32,13 @@ static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size) { CXLDeviceState *cxl_dstate = opaque; - if (size == 4) { - return cxl_dstate->caps_reg_state32[offset / sizeof(*cxl_dstate->caps_reg_state32)]; - } else { - return cxl_dstate->caps_reg_state64[offset / sizeof(*cxl_dstate->caps_reg_state64)]; + switch (size) { + case 4: + return cxl_dstate->caps_reg_state32[offset / size]; + case 8: + return cxl_dstate->caps_reg_state64[offset / size]; + default: + g_assert_not_reached(); } } @@ -59,7 +62,17 @@ static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size) static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size) { - CXLDeviceState *cxl_dstate = opaque; + CXLDeviceState *cxl_dstate; + CXLCCI *cci = opaque; + + if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) { + cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate; + } else if (object_dynamic_cast(OBJECT(cci->intf), + TYPE_CXL_SWITCH_MAILBOX_CCI)) { + cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate; + } else { + return 0; + } switch (size) { case 1: @@ -69,6 +82,25 @@ static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size) case 4: return cxl_dstate->mbox_reg_state32[offset / size]; case 8: + if (offset == A_CXL_DEV_BG_CMD_STS) { + uint64_t bg_status_reg; + bg_status_reg = FIELD_DP64(0, CXL_DEV_BG_CMD_STS, OP, + cci->bg.opcode); + bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS, + PERCENTAGE_COMP, cci->bg.complete_pct); + bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS, + RET_CODE, cci->bg.ret_code); + /* endian? */ + cxl_dstate->mbox_reg_state64[offset / size] = bg_status_reg; + } + if (offset == A_CXL_DEV_MAILBOX_STS) { + uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size]; + if (cci->bg.complete_pct) { + status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP, + 0); + cxl_dstate->mbox_reg_state64[offset / size] = status_reg; + } + } return cxl_dstate->mbox_reg_state64[offset / size]; default: g_assert_not_reached(); @@ -101,8 +133,7 @@ static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset, case A_CXL_DEV_MAILBOX_CMD: break; case A_CXL_DEV_BG_CMD_STS: - /* BG not supported */ - /* fallthrough */ + break; case A_CXL_DEV_MAILBOX_STS: /* Read only register, will get updated by the state machine */ return; @@ -120,7 +151,17 @@ static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset, static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { - CXLDeviceState *cxl_dstate = opaque; + CXLDeviceState *cxl_dstate; + CXLCCI *cci = opaque; + + if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) { + cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate; + } else if (object_dynamic_cast(OBJECT(cci->intf), + TYPE_CXL_SWITCH_MAILBOX_CCI)) { + cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate; + } else { + return; + } if (offset >= A_CXL_DEV_CMD_PAYLOAD) { memcpy(cxl_dstate->mbox_reg_state + offset, &value, size); @@ -140,7 +181,49 @@ static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value, if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, DOORBELL)) { - cxl_process_mailbox(cxl_dstate); + uint64_t command_reg = + cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD]; + uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, + COMMAND_SET); + uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND); + size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH); + uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD; + /* + * Copy taken to avoid need for individual command handlers to care + * about aliasing. + */ + g_autofree uint8_t *pl_in_copy = NULL; + size_t len_out = 0; + uint64_t status_reg; + bool bg_started = false; + int rc; + + pl_in_copy = g_memdup2(pl, len_in); + if (len_in == 0 || pl_in_copy) { + /* Avoid stale data - including from earlier cmds */ + memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE); + rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy, + &len_out, pl, &bg_started); + } else { + rc = CXL_MBOX_INTERNAL_ERROR; + } + + /* Set bg and the return code */ + status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP, + bg_started ? 1 : 0); + status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc); + /* Set the return length */ + command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set); + command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, + COMMAND, cmd); + command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, + LENGTH, len_out); + + cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg; + cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg; + /* Tell the host we're done */ + ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, + DOORBELL, 0); } } @@ -220,7 +303,8 @@ static const MemoryRegionOps caps_ops = { }, }; -void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate) +void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate, + CXLCCI *cci) { /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */ memory_region_init(&cxl_dstate->device_registers, obj, "device-registers", @@ -230,7 +314,7 @@ void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate) "cap-array", CXL_CAPS_SIZE); memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate, "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH); - memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cxl_dstate, + memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci, "mailbox", CXL_MAILBOX_REGISTERS_LENGTH); memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops, cxl_dstate, "memory device caps", @@ -273,16 +357,25 @@ static void device_reg_init_common(CXLDeviceState *cxl_dstate) static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate) { - /* 2048 payload size, with no interrupt or background support */ + const uint8_t msi_n = 9; + + /* 2048 payload size */ ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT); cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE; + /* irq support */ + ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, + BG_INT_CAP, 1); + ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, + MSI_N, msi_n); + cxl_dstate->mbox_msi_n = msi_n; } static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { } -void cxl_device_register_init_common(CXLDeviceState *cxl_dstate) +void cxl_device_register_init_t3(CXLType3Dev *ct3d) { + CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; uint64_t *cap_h = cxl_dstate->caps_reg_state64; const int cap_count = 3; @@ -300,7 +393,29 @@ void cxl_device_register_init_common(CXLDeviceState *cxl_dstate) cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1); memdev_reg_init_common(cxl_dstate); - cxl_initialize_mailbox(cxl_dstate); + cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d), + CXL_MAILBOX_MAX_PAYLOAD_SIZE); +} + +void cxl_device_register_init_swcci(CSWMBCCIDev *sw) +{ + CXLDeviceState *cxl_dstate = &sw->cxl_dstate; + uint64_t *cap_h = cxl_dstate->caps_reg_state64; + const int cap_count = 3; + + /* CXL Device Capabilities Array Register */ + ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0); + ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1); + ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count); + + cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2); + device_reg_init_common(cxl_dstate); + + cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1); + mailbox_reg_init_common(cxl_dstate); + + cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1); + memdev_reg_init_common(cxl_dstate); } uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate) diff --git a/hw/cxl/cxl-events.c b/hw/cxl/cxl-events.c index 3ddd6369ad..bee6dfaf14 100644 --- a/hw/cxl/cxl-events.c +++ b/hw/cxl/cxl-events.c @@ -143,7 +143,7 @@ bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type, CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl, uint8_t log_type, int max_recs, - uint16_t *len) + size_t *len) { CXLEventLog *log; CXLEvent *entry; @@ -170,8 +170,10 @@ CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl, if (log->overflow_err_count) { pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW; pl->overflow_err_count = cpu_to_le16(log->overflow_err_count); - pl->first_overflow_timestamp = cpu_to_le64(log->first_overflow_timestamp); - pl->last_overflow_timestamp = cpu_to_le64(log->last_overflow_timestamp); + pl->first_overflow_timestamp = + cpu_to_le64(log->first_overflow_timestamp); + pl->last_overflow_timestamp = + cpu_to_le64(log->last_overflow_timestamp); } pl->record_count = cpu_to_le16(nr); @@ -180,7 +182,8 @@ CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl, return CXL_MBOX_SUCCESS; } -CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds, CXLClearEventPayload *pl) +CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds, + CXLClearEventPayload *pl) { CXLEventLog *log; uint8_t log_type; diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 434ccc5f6e..b365575097 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -8,13 +8,17 @@ */ #include "qemu/osdep.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" #include "hw/cxl/cxl.h" #include "hw/cxl/cxl_events.h" #include "hw/pci/pci.h" +#include "hw/pci-bridge/cxl_upstream_port.h" #include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/units.h" #include "qemu/uuid.h" +#include "sysemu/hostmem.h" #define CXL_CAPACITY_MULTIPLIER (256 * MiB) @@ -44,6 +48,9 @@ */ enum { + INFOSTAT = 0x00, + #define IS_IDENTIFY 0x1 + #define BACKGROUND_OPERATION_STATUS 0x2 EVENTS = 0x01, #define GET_RECORDS 0x0 #define CLEAR_RECORDS 0x1 @@ -63,27 +70,151 @@ enum { #define GET_PARTITION_INFO 0x0 #define GET_LSA 0x2 #define SET_LSA 0x3 + SANITIZE = 0x44, + #define OVERWRITE 0x0 + #define SECURE_ERASE 0x1 + PERSISTENT_MEM = 0x45, + #define GET_SECURITY_STATE 0x0 MEDIA_AND_POISON = 0x43, #define GET_POISON_LIST 0x0 #define INJECT_POISON 0x1 #define CLEAR_POISON 0x2 + PHYSICAL_SWITCH = 0x51, + #define IDENTIFY_SWITCH_DEVICE 0x0 + #define GET_PHYSICAL_PORT_STATE 0x1 + TUNNEL = 0x53, + #define MANAGEMENT_COMMAND 0x0 }; -struct cxl_cmd; -typedef CXLRetCode (*opcode_handler)(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, uint16_t *len); -struct cxl_cmd { - const char *name; - opcode_handler handler; - ssize_t in; - uint16_t effect; /* Reported in CEL */ - uint8_t *payload; -}; +/* CCI Message Format CXL r3.0 Figure 7-19 */ +typedef struct CXLCCIMessage { + uint8_t category; +#define CXL_CCI_CAT_REQ 0 +#define CXL_CCI_CAT_RSP 1 + uint8_t tag; + uint8_t resv1; + uint8_t command; + uint8_t command_set; + uint8_t pl_length[3]; + uint16_t rc; + uint16_t vendor_specific; + uint8_t payload[]; +} QEMU_PACKED CXLCCIMessage; + +/* This command is only defined to an MLD FM Owned LD or an MHD */ +static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + PCIDevice *tunnel_target; + CXLCCI *target_cci; + struct { + uint8_t port_or_ld_id; + uint8_t target_type; + uint16_t size; + CXLCCIMessage ccimessage; + } QEMU_PACKED *in; + struct { + uint16_t resp_len; + uint8_t resv[2]; + CXLCCIMessage ccimessage; + } QEMU_PACKED *out; + size_t pl_length, length_out; + bool bg_started; + int rc; + + if (cmd->in < sizeof(*in)) { + return CXL_MBOX_INVALID_INPUT; + } + in = (void *)payload_in; + out = (void *)payload_out; + + /* Enough room for minimum sized message - no payload */ + if (in->size < sizeof(in->ccimessage)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + /* Length of input payload should be in->size + a wrapping tunnel header */ + if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + if (in->ccimessage.category != CXL_CCI_CAT_REQ) { + return CXL_MBOX_INVALID_INPUT; + } + + if (in->target_type != 0) { + qemu_log_mask(LOG_UNIMP, + "Tunneled Command sent to non existent FM-LD"); + return CXL_MBOX_INVALID_INPUT; + } + + /* + * Target of a tunnel unfortunately depends on type of CCI readint + * the message. + * If in a switch, then it's the port number. + * If in an MLD it is the ld number. + * If in an MHD target type indicate where we are going. + */ + if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + if (in->port_or_ld_id != 0) { + /* Only pretending to have one for now! */ + return CXL_MBOX_INVALID_INPUT; + } + target_cci = &ct3d->ld0_cci; + } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { + CXLUpstreamPort *usp = CXL_USP(cci->d); + + tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, + in->port_or_ld_id); + if (!tunnel_target) { + return CXL_MBOX_INVALID_INPUT; + } + tunnel_target = + pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; + if (!tunnel_target) { + return CXL_MBOX_INVALID_INPUT; + } + if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { + CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); + /* Tunneled VDMs always land on FM Owned LD */ + target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; + } else { + return CXL_MBOX_INVALID_INPUT; + } + } else { + return CXL_MBOX_INVALID_INPUT; + } + + pl_length = in->ccimessage.pl_length[2] << 16 | + in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; + rc = cxl_process_cci_message(target_cci, + in->ccimessage.command_set, + in->ccimessage.command, + pl_length, in->ccimessage.payload, + &length_out, out->ccimessage.payload, + &bg_started); + /* Payload should be in place. Rest of CCI header and needs filling */ + out->resp_len = length_out + sizeof(CXLCCIMessage); + st24_le_p(out->ccimessage.pl_length, length_out); + out->ccimessage.rc = rc; + out->ccimessage.category = CXL_CCI_CAT_RSP; + out->ccimessage.command = in->ccimessage.command; + out->ccimessage.command_set = in->ccimessage.command_set; + out->ccimessage.tag = in->ccimessage.tag; + *len_out = length_out + sizeof(*out); + + return CXL_MBOX_SUCCESS; +} -static CXLRetCode cmd_events_get_records(struct cxl_cmd *cmd, - CXLDeviceState *cxlds, - uint16_t *len) +static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, + uint8_t *payload_in, size_t len_in, + uint8_t *payload_out, size_t *len_out, + CXLCCI *cci) { + CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; CXLGetEventPayload *pl; uint8_t log_type; int max_recs; @@ -92,9 +223,9 @@ static CXLRetCode cmd_events_get_records(struct cxl_cmd *cmd, return CXL_MBOX_INVALID_INPUT; } - log_type = *((uint8_t *)cmd->payload); + log_type = payload_in[0]; - pl = (CXLGetEventPayload *)cmd->payload; + pl = (CXLGetEventPayload *)payload_out; memset(pl, 0, sizeof(*pl)); max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / @@ -103,28 +234,36 @@ static CXLRetCode cmd_events_get_records(struct cxl_cmd *cmd, max_recs = 0xFFFF; } - return cxl_event_get_records(cxlds, pl, log_type, max_recs, len); + return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); } -static CXLRetCode cmd_events_clear_records(struct cxl_cmd *cmd, - CXLDeviceState *cxlds, - uint16_t *len) +static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { + CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; CXLClearEventPayload *pl; - pl = (CXLClearEventPayload *)cmd->payload; - *len = 0; + pl = (CXLClearEventPayload *)payload_in; + *len_out = 0; return cxl_event_clear_records(cxlds, pl); } -static CXLRetCode cmd_events_get_interrupt_policy(struct cxl_cmd *cmd, - CXLDeviceState *cxlds, - uint16_t *len) +static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { + CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; CXLEventInterruptPolicy *policy; CXLEventLog *log; - policy = (CXLEventInterruptPolicy *)cmd->payload; + policy = (CXLEventInterruptPolicy *)payload_out; memset(policy, 0, sizeof(*policy)); log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; @@ -153,22 +292,26 @@ static CXLRetCode cmd_events_get_interrupt_policy(struct cxl_cmd *cmd, policy->dyn_cap_settings = CXL_INT_MSI_MSIX; } - *len = sizeof(*policy); + *len_out = sizeof(*policy); return CXL_MBOX_SUCCESS; } -static CXLRetCode cmd_events_set_interrupt_policy(struct cxl_cmd *cmd, - CXLDeviceState *cxlds, - uint16_t *len) +static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { + CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; CXLEventInterruptPolicy *policy; CXLEventLog *log; - if (*len < CXL_EVENT_INT_SETTING_MIN_LEN) { + if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { return CXL_MBOX_INVALID_PAYLOAD_LENGTH; } - policy = (CXLEventInterruptPolicy *)cmd->payload; + policy = (CXLEventInterruptPolicy *)payload_in; log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == @@ -187,7 +330,7 @@ static CXLRetCode cmd_events_set_interrupt_policy(struct cxl_cmd *cmd, CXL_INT_MSI_MSIX; /* DCD is optional */ - if (*len < sizeof(*policy)) { + if (len_in < sizeof(*policy)) { return CXL_MBOX_SUCCESS; } @@ -195,15 +338,286 @@ static CXLRetCode cmd_events_set_interrupt_policy(struct cxl_cmd *cmd, log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == CXL_INT_MSI_MSIX; - *len = sizeof(*policy); + *len_out = 0; + return CXL_MBOX_SUCCESS; +} + +/* CXL r3.0 section 8.2.9.1.1: Identify (Opcode 0001h) */ +static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); + struct { + uint16_t pcie_vid; + uint16_t pcie_did; + uint16_t pcie_subsys_vid; + uint16_t pcie_subsys_id; + uint64_t sn; + uint8_t max_message_size; + uint8_t component_type; + } QEMU_PACKED *is_identify; + QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); + + is_identify = (void *)payload_out; + memset(is_identify, 0, sizeof(*is_identify)); + is_identify->pcie_vid = class->vendor_id; + is_identify->pcie_did = class->device_id; + if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { + is_identify->sn = CXL_USP(cci->d)->sn; + /* Subsystem info not defined for a USP */ + is_identify->pcie_subsys_vid = 0; + is_identify->pcie_subsys_id = 0; + is_identify->component_type = 0x0; /* Switch */ + } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { + PCIDevice *pci_dev = PCI_DEVICE(cci->d); + + is_identify->sn = CXL_TYPE3(cci->d)->sn; + /* + * We can't always use class->subsystem_vendor_id as + * it is not set if the defaults are used. + */ + is_identify->pcie_subsys_vid = + pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); + is_identify->pcie_subsys_id = + pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); + is_identify->component_type = 0x3; /* Type 3 */ + } + + /* TODO: Allow this to vary across different CCIs */ + is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */ + *len_out = sizeof(*is_identify); + return CXL_MBOX_SUCCESS; +} + +static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, + void *private) +{ + uint8_t *bm = private; + if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { + uint8_t port = PCIE_PORT(d)->port; + bm[port / 8] |= 1 << (port % 8); + } +} + +/* CXL r3 8.2.9.1.1 */ +static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + PCIEPort *usp = PCIE_PORT(cci->d); + PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; + int num_phys_ports = pcie_count_ds_ports(bus); + + struct cxl_fmapi_ident_switch_dev_resp_pl { + uint8_t ingress_port_id; + uint8_t rsvd; + uint8_t num_physical_ports; + uint8_t num_vcss; + uint8_t active_port_bitmask[0x20]; + uint8_t active_vcs_bitmask[0x20]; + uint16_t total_vppbs; + uint16_t bound_vppbs; + uint8_t num_hdm_decoders_per_usp; + } QEMU_PACKED *out; + QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); + + out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; + *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { + .num_physical_ports = num_phys_ports + 1, /* 1 USP */ + .num_vcss = 1, /* Not yet support multiple VCS - potentialy tricky */ + .active_vcs_bitmask[0] = 0x1, + .total_vppbs = num_phys_ports + 1, + .bound_vppbs = num_phys_ports + 1, + .num_hdm_decoders_per_usp = 4, + }; + + /* Depends on the CCI type */ + if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { + out->ingress_port_id = PCIE_PORT(cci->intf)->port; + } else { + /* MCTP? */ + out->ingress_port_id = 0; + } + + pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, + out->active_port_bitmask); + out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); + + *len_out = sizeof(*out); + + return CXL_MBOX_SUCCESS; +} + +/* CXL r3.0 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ +static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + /* CXL r3.0 Table 7-18: Get Physical Port State Request Payload */ + struct cxl_fmapi_get_phys_port_state_req_pl { + uint8_t num_ports; + uint8_t ports[]; + } QEMU_PACKED *in; + + /* + * CXL r3.0 Table 7-20: Get Physical Port State Port Information Block + * Format + */ + struct cxl_fmapi_port_state_info_block { + uint8_t port_id; + uint8_t config_state; + uint8_t connected_device_cxl_version; + uint8_t rsv1; + uint8_t connected_device_type; + uint8_t port_cxl_version_bitmask; + uint8_t max_link_width; + uint8_t negotiated_link_width; + uint8_t supported_link_speeds_vector; + uint8_t max_link_speed; + uint8_t current_link_speed; + uint8_t ltssm_state; + uint8_t first_lane_num; + uint16_t link_state; + uint8_t supported_ld_count; + } QEMU_PACKED; + + /* CXL r3.0 Table 7-19: Get Physical Port State Response Payload */ + struct cxl_fmapi_get_phys_port_state_resp_pl { + uint8_t num_ports; + uint8_t rsv1[3]; + struct cxl_fmapi_port_state_info_block ports[]; + } QEMU_PACKED *out; + PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; + PCIEPort *usp = PCIE_PORT(cci->d); + size_t pl_size; + int i; + + in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; + out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; + + /* Check if what was requested can fit */ + if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { + return CXL_MBOX_INVALID_INPUT; + } + + /* For success there should be a match for each requested */ + out->num_ports = in->num_ports; + + for (i = 0; i < in->num_ports; i++) { + struct cxl_fmapi_port_state_info_block *port; + /* First try to match on downstream port */ + PCIDevice *port_dev; + uint16_t lnkcap, lnkcap2, lnksta; + + port = &out->ports[i]; + + port_dev = pcie_find_port_by_pn(bus, in->ports[i]); + if (port_dev) { /* DSP */ + PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) + ->devices[0]; + port->config_state = 3; + if (ds_dev) { + if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { + port->connected_device_type = 5; /* Assume MLD for now */ + } else { + port->connected_device_type = 1; + } + } else { + port->connected_device_type = 0; + } + port->supported_ld_count = 3; + } else if (usp->port == in->ports[i]) { /* USP */ + port_dev = PCI_DEVICE(usp); + port->config_state = 4; + port->connected_device_type = 0; + } else { + return CXL_MBOX_INVALID_INPUT; + } + + port->port_id = in->ports[i]; + /* Information on status of this port in lnksta, lnkcap */ + if (!port_dev->exp.exp_cap) { + return CXL_MBOX_INTERNAL_ERROR; + } + lnksta = port_dev->config_read(port_dev, + port_dev->exp.exp_cap + PCI_EXP_LNKSTA, + sizeof(lnksta)); + lnkcap = port_dev->config_read(port_dev, + port_dev->exp.exp_cap + PCI_EXP_LNKCAP, + sizeof(lnkcap)); + lnkcap2 = port_dev->config_read(port_dev, + port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, + sizeof(lnkcap2)); + + port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; + /* No definition for SLS field in linux/pci_regs.h */ + port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; + port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; + port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; + /* TODO: Track down if we can get the rest of the info */ + port->ltssm_state = 0x7; + port->first_lane_num = 0; + port->link_state = 0; + port->port_cxl_version_bitmask = 0x2; + port->connected_device_cxl_version = 0x2; + } + + pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; + *len_out = pl_size; + + return CXL_MBOX_SUCCESS; +} + +/* CXL r3.0 8.2.9.1.2 */ +static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint8_t status; + uint8_t rsvd; + uint16_t opcode; + uint16_t returncode; + uint16_t vendor_ext_status; + } QEMU_PACKED *bg_op_status; + QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); + + bg_op_status = (void *)payload_out; + memset(bg_op_status, 0, sizeof(*bg_op_status)); + bg_op_status->status = cci->bg.complete_pct << 1; + if (cci->bg.runtime > 0) { + bg_op_status->status |= 1U << 0; + } + bg_op_status->opcode = cci->bg.opcode; + bg_op_status->returncode = cci->bg.ret_code; + *len_out = sizeof(*bg_op_status); + return CXL_MBOX_SUCCESS; } /* 8.2.9.2.1 */ -static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { + CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; struct { uint8_t slots_supported; uint8_t slot_info; @@ -221,7 +635,7 @@ static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd, return CXL_MBOX_INTERNAL_ERROR; } - fw_info = (void *)cmd->payload; + fw_info = (void *)payload_out; memset(fw_info, 0, sizeof(*fw_info)); fw_info->slots_supported = 2; @@ -229,34 +643,43 @@ static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd, fw_info->caps = 0; pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); - *len = sizeof(*fw_info); + *len_out = sizeof(*fw_info); return CXL_MBOX_SUCCESS; } /* 8.2.9.3.1 */ -static CXLRetCode cmd_timestamp_get(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { + CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); - stq_le_p(cmd->payload, final_time); - *len = 8; + stq_le_p(payload_out, final_time); + *len_out = 8; return CXL_MBOX_SUCCESS; } /* 8.2.9.3.2 */ -static CXLRetCode cmd_timestamp_set(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { + CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; + cxl_dstate->timestamp.set = true; cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)cmd->payload); + cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); - *len = 0; + *len_out = 0; return CXL_MBOX_SUCCESS; } @@ -267,9 +690,12 @@ static const QemuUUID cel_uuid = { }; /* 8.2.9.4.1 */ -static CXLRetCode cmd_logs_get_supported(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { struct { uint16_t entries; @@ -278,27 +704,32 @@ static CXLRetCode cmd_logs_get_supported(struct cxl_cmd *cmd, QemuUUID uuid; uint32_t size; } log_entries[1]; - } QEMU_PACKED *supported_logs = (void *)cmd->payload; + } QEMU_PACKED *supported_logs = (void *)payload_out; QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); supported_logs->entries = 1; supported_logs->log_entries[0].uuid = cel_uuid; - supported_logs->log_entries[0].size = 4 * cxl_dstate->cel_size; + supported_logs->log_entries[0].size = 4 * cci->cel_size; - *len = sizeof(*supported_logs); + *len_out = sizeof(*supported_logs); return CXL_MBOX_SUCCESS; } /* 8.2.9.4.2 */ -static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { struct { QemuUUID uuid; uint32_t offset; uint32_t length; - } QEMU_PACKED QEMU_ALIGNED(16) *get_log = (void *)cmd->payload; + } QEMU_PACKED QEMU_ALIGNED(16) *get_log; + + get_log = (void *)payload_in; /* * 8.2.9.4.2 @@ -313,7 +744,7 @@ static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd, * the only possible failure would be if the mailbox itself isn't big * enough. */ - if (get_log->offset + get_log->length > cxl_dstate->payload_size) { + if (get_log->offset + get_log->length > cci->payload_max) { return CXL_MBOX_INVALID_INPUT; } @@ -322,18 +753,20 @@ static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd, } /* Store off everything to local variables so we can wipe out the payload */ - *len = get_log->length; + *len_out = get_log->length; - memmove(cmd->payload, cxl_dstate->cel_log + get_log->offset, - get_log->length); + memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); return CXL_MBOX_SUCCESS; } /* 8.2.9.5.1.1 */ -static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { struct { char fw_revision[0x10]; @@ -352,43 +785,50 @@ static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd, uint8_t qos_telemetry_caps; } QEMU_PACKED *id; QEMU_BUILD_BUG_ON(sizeof(*id) != 0x43); - - CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); + CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) { return CXL_MBOX_INTERNAL_ERROR; } - id = (void *)cmd->payload; + id = (void *)payload_out; memset(id, 0, sizeof(*id)); snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); - stq_le_p(&id->total_capacity, cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER); - stq_le_p(&id->persistent_capacity, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); - stq_le_p(&id->volatile_capacity, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&id->total_capacity, + cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&id->persistent_capacity, + cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&id->volatile_capacity, + cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); /* 256 poison records */ st24_le_p(id->poison_list_max_mer, 256); /* No limit - so limited by main poison record limit */ stw_le_p(&id->inject_poison_limit, 0); - *len = sizeof(*id); + *len_out = sizeof(*id); return CXL_MBOX_SUCCESS; } -static CXLRetCode cmd_ccls_get_partition_info(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { + CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; struct { uint64_t active_vmem; uint64_t active_pmem; uint64_t next_vmem; uint64_t next_pmem; - } QEMU_PACKED *part_info = (void *)cmd->payload; + } QEMU_PACKED *part_info = (void *)payload_out; QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || @@ -396,82 +836,207 @@ static CXLRetCode cmd_ccls_get_partition_info(struct cxl_cmd *cmd, return CXL_MBOX_INTERNAL_ERROR; } - stq_le_p(&part_info->active_vmem, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&part_info->active_vmem, + cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); /* * When both next_vmem and next_pmem are 0, there is no pending change to * partitioning. */ stq_le_p(&part_info->next_vmem, 0); - stq_le_p(&part_info->active_pmem, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); + stq_le_p(&part_info->active_pmem, + cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); stq_le_p(&part_info->next_pmem, 0); - *len = sizeof(*part_info); + *len_out = sizeof(*part_info); return CXL_MBOX_SUCCESS; } -static CXLRetCode cmd_ccls_get_lsa(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { struct { uint32_t offset; uint32_t length; } QEMU_PACKED *get_lsa; - CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); uint32_t offset, length; - get_lsa = (void *)cmd->payload; + get_lsa = (void *)payload_in; offset = get_lsa->offset; length = get_lsa->length; if (offset + length > cvc->get_lsa_size(ct3d)) { - *len = 0; + *len_out = 0; return CXL_MBOX_INVALID_INPUT; } - *len = cvc->get_lsa(ct3d, get_lsa, length, offset); + *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); return CXL_MBOX_SUCCESS; } -static CXLRetCode cmd_ccls_set_lsa(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { struct set_lsa_pl { uint32_t offset; uint32_t rsvd; uint8_t data[]; } QEMU_PACKED; - struct set_lsa_pl *set_lsa_payload = (void *)cmd->payload; - CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + struct set_lsa_pl *set_lsa_payload = (void *)payload_in; + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); const size_t hdr_len = offsetof(struct set_lsa_pl, data); - uint16_t plen = *len; - *len = 0; - if (!plen) { + *len_out = 0; + if (!len_in) { return CXL_MBOX_SUCCESS; } - if (set_lsa_payload->offset + plen > cvc->get_lsa_size(ct3d) + hdr_len) { + if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { return CXL_MBOX_INVALID_INPUT; } - plen -= hdr_len; + len_in -= hdr_len; - cvc->set_lsa(ct3d, set_lsa_payload->data, plen, set_lsa_payload->offset); + cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); return CXL_MBOX_SUCCESS; } +/* Perform the actual device zeroing */ +static void __do_sanitization(CXLType3Dev *ct3d) +{ + MemoryRegion *mr; + + if (ct3d->hostvmem) { + mr = host_memory_backend_get_memory(ct3d->hostvmem); + if (mr) { + void *hostmem = memory_region_get_ram_ptr(mr); + memset(hostmem, 0, memory_region_size(mr)); + } + } + + if (ct3d->hostpmem) { + mr = host_memory_backend_get_memory(ct3d->hostpmem); + if (mr) { + void *hostmem = memory_region_get_ram_ptr(mr); + memset(hostmem, 0, memory_region_size(mr)); + } + } + if (ct3d->lsa) { + mr = host_memory_backend_get_memory(ct3d->lsa); + if (mr) { + void *lsa = memory_region_get_ram_ptr(mr); + memset(lsa, 0, memory_region_size(mr)); + } + } +} + +/* + * CXL 3.0 spec section 8.2.9.8.5.1 - Sanitize. + * + * Once the Sanitize command has started successfully, the device shall be + * placed in the media disabled state. If the command fails or is interrupted + * by a reset or power failure, it shall remain in the media disabled state + * until a successful Sanitize command has been completed. During this state: + * + * 1. Memory writes to the device will have no effect, and all memory reads + * will return random values (no user data returned, even for locations that + * the failed Sanitize operation didn’t sanitize yet). + * + * 2. Mailbox commands shall still be processed in the disabled state, except + * that commands that access Sanitized areas shall fail with the Media Disabled + * error code. + */ +static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + uint64_t total_mem; /* in Mb */ + int secs; + + total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; + if (total_mem <= 512) { + secs = 4; + } else if (total_mem <= 1024) { + secs = 8; + } else if (total_mem <= 2 * 1024) { + secs = 15; + } else if (total_mem <= 4 * 1024) { + secs = 30; + } else if (total_mem <= 8 * 1024) { + secs = 60; + } else if (total_mem <= 16 * 1024) { + secs = 2 * 60; + } else if (total_mem <= 32 * 1024) { + secs = 4 * 60; + } else if (total_mem <= 64 * 1024) { + secs = 8 * 60; + } else if (total_mem <= 128 * 1024) { + secs = 15 * 60; + } else if (total_mem <= 256 * 1024) { + secs = 30 * 60; + } else if (total_mem <= 512 * 1024) { + secs = 60 * 60; + } else if (total_mem <= 1024 * 1024) { + secs = 120 * 60; + } else { + secs = 240 * 60; /* max 4 hrs */ + } + + /* EBUSY other bg cmds as of now */ + cci->bg.runtime = secs * 1000UL; + *len_out = 0; + + cxl_dev_disable_media(&ct3d->cxl_dstate); + + if (secs > 2) { + /* sanitize when done */ + return CXL_MBOX_BG_STARTED; + } else { + __do_sanitization(ct3d); + cxl_dev_enable_media(&ct3d->cxl_dstate); + + return CXL_MBOX_SUCCESS; + } +} + +static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + uint32_t *state = (uint32_t *)payload_out; + + *state = 0; + *len_out = 4; + return CXL_MBOX_SUCCESS; +} /* * This is very inefficient, but good enough for now! * Also the payload will always fit, so no need to handle the MORE flag and * make this stateful. We may want to allow longer poison lists to aid * testing that kernel functionality. */ -static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { struct get_poison_list_pl { uint64_t pa; @@ -491,9 +1056,9 @@ static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd, } QEMU_PACKED records[]; } QEMU_PACKED; - struct get_poison_list_pl *in = (void *)cmd->payload; - struct get_poison_list_out_pl *out = (void *)cmd->payload; - CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + struct get_poison_list_pl *in = (void *)payload_in; + struct get_poison_list_out_pl *out = (void *)payload_out; + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); uint16_t record_count = 0, i = 0; uint64_t query_start, query_length; CXLPoisonList *poison_list = &ct3d->poison_list; @@ -541,21 +1106,24 @@ static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd, stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); } stw_le_p(&out->count, record_count); - *len = out_pl_len; + *len_out = out_pl_len; return CXL_MBOX_SUCCESS; } -static CXLRetCode cmd_media_inject_poison(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len_unused) +static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { - CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); CXLPoisonList *poison_list = &ct3d->poison_list; CXLPoison *ent; struct inject_poison_pl { uint64_t dpa; }; - struct inject_poison_pl *in = (void *)cmd->payload; + struct inject_poison_pl *in = (void *)payload_in; uint64_t dpa = ldq_le_p(&in->dpa); CXLPoison *p; @@ -580,15 +1148,20 @@ static CXLRetCode cmd_media_inject_poison(struct cxl_cmd *cmd, */ QLIST_INSERT_HEAD(poison_list, p, node); ct3d->poison_list_cnt++; + *len_out = 0; return CXL_MBOX_SUCCESS; } -static CXLRetCode cmd_media_clear_poison(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len_unused) +static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) { - CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; CXLPoisonList *poison_list = &ct3d->poison_list; CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); struct clear_poison_pl { @@ -598,7 +1171,7 @@ static CXLRetCode cmd_media_clear_poison(struct cxl_cmd *cmd, CXLPoison *ent; uint64_t dpa; - struct clear_poison_pl *in = (void *)cmd->payload; + struct clear_poison_pl *in = (void *)payload_in; dpa = ldq_le_p(&in->dpa); if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->mem_size) { @@ -659,6 +1232,7 @@ static CXLRetCode cmd_media_clear_poison(struct cxl_cmd *cmd, } /* Any fragments have been added, free original entry */ g_free(ent); + *len_out = 0; return CXL_MBOX_SUCCESS; } @@ -667,8 +1241,10 @@ static CXLRetCode cmd_media_clear_poison(struct cxl_cmd *cmd, #define IMMEDIATE_DATA_CHANGE (1 << 2) #define IMMEDIATE_POLICY_CHANGE (1 << 3) #define IMMEDIATE_LOG_CHANGE (1 << 4) +#define SECURITY_STATE_CHANGE (1 << 5) +#define BACKGROUND_OPERATION (1 << 6) -static struct cxl_cmd cxl_cmd_set[256][256] = { +static const struct cxl_cmd cxl_cmd_set[256][256] = { [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", cmd_events_get_records, 1, 0 }, [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", @@ -681,8 +1257,10 @@ static struct cxl_cmd cxl_cmd_set[256][256] = { [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", cmd_firmware_update_get_info, 0, 0 }, [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, - [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, IMMEDIATE_POLICY_CHANGE }, - [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 0 }, + [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, + 8, IMMEDIATE_POLICY_CHANGE }, + [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, + 0, 0 }, [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", cmd_identify_memory_device, 0, 0 }, @@ -691,6 +1269,10 @@ static struct cxl_cmd cxl_cmd_set[256][256] = { [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE }, + [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, + IMMEDIATE_DATA_CHANGE | SECURITY_STATE_CHANGE | BACKGROUND_OPERATION }, + [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", + cmd_get_security_state, 0, 0 }, [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", cmd_media_get_poison_list, 16, 0 }, [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", @@ -699,63 +1281,231 @@ static struct cxl_cmd cxl_cmd_set[256][256] = { cmd_media_clear_poison, 72, 0 }, }; -void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { + [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, + [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", + cmd_infostat_bg_op_sts, 0, 0 }, + [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, + [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 0, + IMMEDIATE_POLICY_CHANGE }, + [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, + 0 }, + [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, + [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", + cmd_identify_switch_device, 0, 0 }, + [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", + cmd_get_physical_port_state, ~0, 0 }, + [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", + cmd_tunnel_management_cmd, ~0, 0 }, +}; + +/* + * While the command is executing in the background, the device should + * update the percentage complete in the Background Command Status Register + * at least once per second. + */ + +#define CXL_MBOX_BG_UPDATE_FREQ 1000UL + +int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, + size_t len_in, uint8_t *pl_in, size_t *len_out, + uint8_t *pl_out, bool *bg_started) { - uint16_t ret = CXL_MBOX_SUCCESS; - struct cxl_cmd *cxl_cmd; - uint64_t status_reg; + int ret; + const struct cxl_cmd *cxl_cmd; opcode_handler h; - uint64_t command_reg = cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD]; - uint8_t set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET); - uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND); - uint16_t len = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH); - cxl_cmd = &cxl_cmd_set[set][cmd]; + *len_out = 0; + cxl_cmd = &cci->cxl_cmd_set[set][cmd]; h = cxl_cmd->handler; - if (h) { - if (len == cxl_cmd->in || cxl_cmd->in == ~0) { - cxl_cmd->payload = cxl_dstate->mbox_reg_state + - A_CXL_DEV_CMD_PAYLOAD; - ret = (*h)(cxl_cmd, cxl_dstate, &len); - assert(len <= cxl_dstate->payload_size); - } else { - ret = CXL_MBOX_INVALID_PAYLOAD_LENGTH; - } - } else { + if (!h) { qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", set << 8 | cmd); - ret = CXL_MBOX_UNSUPPORTED; + return CXL_MBOX_UNSUPPORTED; } - /* Set the return code */ - status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, ERRNO, ret); + if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } - /* Set the return length */ - command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET, 0); - command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND, 0); - command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH, len); + /* Only one bg command at a time */ + if ((cxl_cmd->effect & BACKGROUND_OPERATION) && + cci->bg.runtime > 0) { + return CXL_MBOX_BUSY; + } - cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg; - cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg; + /* forbid any selected commands while overwriting */ + if (sanitize_running(cci)) { + if (h == cmd_events_get_records || + h == cmd_ccls_get_partition_info || + h == cmd_ccls_set_lsa || + h == cmd_ccls_get_lsa || + h == cmd_logs_get_log || + h == cmd_media_get_poison_list || + h == cmd_media_inject_poison || + h == cmd_media_clear_poison || + h == cmd_sanitize_overwrite) { + return CXL_MBOX_MEDIA_DISABLED; + } + } - /* Tell the host we're done */ - ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, - DOORBELL, 0); + ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); + if ((cxl_cmd->effect & BACKGROUND_OPERATION) && + ret == CXL_MBOX_BG_STARTED) { + *bg_started = true; + } else { + *bg_started = false; + } + + /* Set bg and the return code */ + if (*bg_started) { + uint64_t now; + + cci->bg.opcode = (set << 8) | cmd; + + cci->bg.complete_pct = 0; + cci->bg.ret_code = 0; + + now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + cci->bg.starttime = now; + timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); + } + + return ret; } -void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate) +static void bg_timercb(void *opaque) { + CXLCCI *cci = opaque; + uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + uint64_t total_time = cci->bg.starttime + cci->bg.runtime; + + assert(cci->bg.runtime > 0); + + if (now >= total_time) { /* we are done */ + uint16_t ret = CXL_MBOX_SUCCESS; + + cci->bg.complete_pct = 100; + cci->bg.ret_code = ret; + if (ret == CXL_MBOX_SUCCESS) { + switch (cci->bg.opcode) { + case 0x4400: /* sanitize */ + { + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + + __do_sanitization(ct3d); + cxl_dev_enable_media(&ct3d->cxl_dstate); + } + break; + case 0x4304: /* TODO: scan media */ + break; + default: + __builtin_unreachable(); + break; + } + } + + qemu_log("Background command %04xh finished: %s\n", + cci->bg.opcode, + ret == CXL_MBOX_SUCCESS ? "success" : "aborted"); + } else { + /* estimate only */ + cci->bg.complete_pct = 100 * now / total_time; + timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); + } + + if (cci->bg.complete_pct == 100) { + /* TODO: generalize to switch CCI */ + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; + PCIDevice *pdev = PCI_DEVICE(cci->d); + + cci->bg.starttime = 0; + /* registers are updated, allow new bg-capable cmds */ + cci->bg.runtime = 0; + + if (msix_enabled(pdev)) { + msix_notify(pdev, cxl_dstate->mbox_msi_n); + } else if (msi_enabled(pdev)) { + msi_notify(pdev, cxl_dstate->mbox_msi_n); + } + } +} + +void cxl_init_cci(CXLCCI *cci, size_t payload_max) +{ + cci->payload_max = payload_max; for (int set = 0; set < 256; set++) { for (int cmd = 0; cmd < 256; cmd++) { - if (cxl_cmd_set[set][cmd].handler) { - struct cxl_cmd *c = &cxl_cmd_set[set][cmd]; + if (cci->cxl_cmd_set[set][cmd].handler) { + const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; struct cel_log *log = - &cxl_dstate->cel_log[cxl_dstate->cel_size]; + &cci->cel_log[cci->cel_size]; log->opcode = (set << 8) | cmd; log->effect = c->effect; - cxl_dstate->cel_size++; + cci->cel_size++; } } } + cci->bg.complete_pct = 0; + cci->bg.starttime = 0; + cci->bg.runtime = 0; + cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + bg_timercb, cci); +} + +void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, + DeviceState *d, size_t payload_max) +{ + cci->cxl_cmd_set = cxl_cmd_set_sw; + cci->d = d; + cci->intf = intf; + cxl_init_cci(cci, payload_max); +} + +void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) +{ + cci->cxl_cmd_set = cxl_cmd_set; + cci->d = d; + + /* No separation for PCI MB as protocol handled in PCI device */ + cci->intf = d; + cxl_init_cci(cci, payload_max); +} + +static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { + [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, + [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, + 0 }, + [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, +}; + +void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, + size_t payload_max) +{ + cci->cxl_cmd_set = cxl_cmd_set_t3_ld; + cci->d = d; + cci->intf = intf; + cxl_init_cci(cci, payload_max); +} + +static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { + [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, + [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, + 0 }, + [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, + [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, + [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", + cmd_tunnel_management_cmd, ~0, 0 }, +}; + +void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, + DeviceState *intf, + size_t payload_max) +{ + cci->cxl_cmd_set = cxl_cmd_set_t3_fm_owned_ld_mctp; + cci->d = d; + cci->intf = intf; + cxl_init_cci(cci, payload_max); } diff --git a/hw/cxl/meson.build b/hw/cxl/meson.build index e261ff3881..ea0aebf6e3 100644 --- a/hw/cxl/meson.build +++ b/hw/cxl/meson.build @@ -6,6 +6,7 @@ system_ss.add(when: 'CONFIG_CXL', 'cxl-host.c', 'cxl-cdat.c', 'cxl-events.c', + 'switch-mailbox-cci.c', ), if_false: files( 'cxl-host-stubs.c', diff --git a/hw/cxl/switch-mailbox-cci.c b/hw/cxl/switch-mailbox-cci.c new file mode 100644 index 0000000000..ba399c6240 --- /dev/null +++ b/hw/cxl/switch-mailbox-cci.c @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Emulation of a CXL Switch Mailbox CCI PCIe function. + * + * Copyright (c) 2023 Huawei Technologies. + * + * From www.computeexpresslink.org + * Compute Express Link (CXL) Specification revision 3.0 Version 1.0 + */ +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/pci-bridge/cxl_upstream_port.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" +#include "hw/cxl/cxl.h" + +static void cswmbcci_reset(DeviceState *dev) +{ + CSWMBCCIDev *cswmb = CXL_SWITCH_MAILBOX_CCI(dev); + cxl_device_register_init_swcci(cswmb); +} + +static void cswbcci_realize(PCIDevice *pci_dev, Error **errp) +{ + CSWMBCCIDev *cswmb = CXL_SWITCH_MAILBOX_CCI(pci_dev); + CXLComponentState *cxl_cstate = &cswmb->cxl_cstate; + CXLDeviceState *cxl_dstate = &cswmb->cxl_dstate; + CXLDVSECRegisterLocator *regloc_dvsec; + CXLUpstreamPort *usp; + + if (!cswmb->target) { + error_setg(errp, "Target not set"); + return; + } + usp = CXL_USP(cswmb->target); + + pcie_endpoint_cap_init(pci_dev, 0x80); + cxl_cstate->dvsec_offset = 0x100; + cxl_cstate->pdev = pci_dev; + cswmb->cci = &usp->swcci; + cxl_device_register_block_init(OBJECT(pci_dev), cxl_dstate, cswmb->cci); + pci_register_bar(pci_dev, 0, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, + &cxl_dstate->device_registers); + regloc_dvsec = &(CXLDVSECRegisterLocator) { + .rsvd = 0, + .reg0_base_lo = RBI_CXL_DEVICE_REG | 0, + .reg0_base_hi = 0, + }; + cxl_component_create_dvsec(cxl_cstate, CXL3_SWITCH_MAILBOX_CCI, + REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC, + REG_LOC_DVSEC_REVID, (uint8_t *)regloc_dvsec); + + cxl_initialize_mailbox_swcci(cswmb->cci, DEVICE(pci_dev), + DEVICE(cswmb->target), + CXL_MAILBOX_MAX_PAYLOAD_SIZE); +} + +static void cswmbcci_exit(PCIDevice *pci_dev) +{ + /* Nothing to do here yet */ +} + +static Property cxl_switch_cci_props[] = { + DEFINE_PROP_LINK("target", CSWMBCCIDev, + target, TYPE_CXL_USP, PCIDevice *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void cswmbcci_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); + + pc->realize = cswbcci_realize; + pc->exit = cswmbcci_exit; + /* Serial bus, CXL Switch CCI */ + pc->class_id = 0x0c0b; + /* + * Huawei Technologies + * CXL Switch Mailbox CCI - DID assigned for emulation only. + * No real hardware will ever use this ID. + */ + pc->vendor_id = 0x19e5; + pc->device_id = 0xa123; + pc->revision = 0; + dc->desc = "CXL Switch Mailbox CCI"; + dc->reset = cswmbcci_reset; + device_class_set_props(dc, cxl_switch_cci_props); +} + +static const TypeInfo cswmbcci_info = { + .name = TYPE_CXL_SWITCH_MAILBOX_CCI, + .parent = TYPE_PCI_DEVICE, + .class_init = cswmbcci_class_init, + .instance_size = sizeof(CSWMBCCIDev), + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void cxl_switch_mailbox_cci_register(void) +{ + type_register_static(&cswmbcci_info); +} +type_init(cxl_switch_mailbox_cci_register); diff --git a/hw/display/Kconfig b/hw/display/Kconfig index 7b3da68d1c..1aafe1923d 100644 --- a/hw/display/Kconfig +++ b/hw/display/Kconfig @@ -93,7 +93,7 @@ config VGA config QXL bool - depends on SPICE && PCI + depends on SPICE && PCI && PIXMAN select VGA config VIRTIO_GPU @@ -134,3 +134,8 @@ config MACFB bool select FRAMEBUFFER depends on NUBUS + +config XLNX_DISPLAYPORT + bool + # defaults to "N", enabled by specific boards + depends on PIXMAN diff --git a/hw/display/ati.c b/hw/display/ati.c index 9a87a5504a..569b8f6165 100644 --- a/hw/display/ati.c +++ b/hw/display/ati.c @@ -32,6 +32,12 @@ #define ATI_DEBUG_HW_CURSOR 0 +#ifdef CONFIG_PIXMAN +#define DEFAULT_X_PIXMAN 3 +#else +#define DEFAULT_X_PIXMAN 0 +#endif + static const struct { const char *name; uint16_t dev_id; @@ -946,6 +952,12 @@ static void ati_vga_realize(PCIDevice *dev, Error **errp) ATIVGAState *s = ATI_VGA(dev); VGACommonState *vga = &s->vga; +#ifndef CONFIG_PIXMAN + if (s->use_pixman != 0) { + warn_report("x-pixman != 0, not effective without PIXMAN"); + } +#endif + if (s->model) { int i; for (i = 0; i < ARRAY_SIZE(ati_model_aliases); i++) { @@ -1033,7 +1045,8 @@ static Property ati_vga_properties[] = { DEFINE_PROP_UINT16("x-device-id", ATIVGAState, dev_id, PCI_DEVICE_ID_ATI_RAGE128_PF), DEFINE_PROP_BOOL("guest_hwcursor", ATIVGAState, cursor_guest_mode, false), - DEFINE_PROP_UINT8("x-pixman", ATIVGAState, use_pixman, 3), + /* this is a debug option, prefer PROP_UINT over PROP_BIT for simplicity */ + DEFINE_PROP_UINT8("x-pixman", ATIVGAState, use_pixman, DEFAULT_X_PIXMAN), DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c index 0e6b8e4367..309bb5ccb6 100644 --- a/hw/display/ati_2d.c +++ b/hw/display/ati_2d.c @@ -123,6 +123,7 @@ void ati_2d_blt(ATIVGAState *s) src_bits, dst_bits, src_stride, dst_stride, bpp, bpp, src_x, src_y, dst_x, dst_y, s->regs.dst_width, s->regs.dst_height); +#ifdef CONFIG_PIXMAN if ((s->use_pixman & BIT(1)) && s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT && s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM) { @@ -147,7 +148,9 @@ void ati_2d_blt(ATIVGAState *s) s->regs.dst_width, s->regs.dst_height); } g_free(tmp); - } else { + } else +#endif + { fallback = true; } if (fallback) { @@ -206,9 +209,12 @@ void ati_2d_blt(ATIVGAState *s) DPRINTF("pixman_fill(%p, %d, %d, %d, %d, %d, %d, %x)\n", dst_bits, dst_stride, bpp, dst_x, dst_y, s->regs.dst_width, s->regs.dst_height, filler); +#ifdef CONFIG_PIXMAN if (!(s->use_pixman & BIT(0)) || !pixman_fill((uint32_t *)dst_bits, dst_stride, bpp, dst_x, dst_y, - s->regs.dst_width, s->regs.dst_height, filler)) { + s->regs.dst_width, s->regs.dst_height, filler)) +#endif + { /* fallback when pixman failed or we don't want to call it */ unsigned int x, y, i, bypp = bpp / 8; unsigned int dst_pitch = dst_stride * sizeof(uint32_t); diff --git a/hw/display/meson.build b/hw/display/meson.build index 2b64fd9f9d..344dfe3d8c 100644 --- a/hw/display/meson.build +++ b/hw/display/meson.build @@ -58,11 +58,11 @@ if config_all_devices.has_key('CONFIG_QXL') endif system_ss.add(when: 'CONFIG_DPCD', if_true: files('dpcd.c')) -system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dp.c')) +system_ss.add(when: 'CONFIG_XLNX_DISPLAYPORT', if_true: files('xlnx_dp.c')) system_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c')) -system_ss.add(when: [pixman, 'CONFIG_ATI_VGA'], if_true: files('ati.c', 'ati_2d.c', 'ati_dbg.c')) +system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_dbg.c'), pixman]) if config_all_devices.has_key('CONFIG_VIRTIO_GPU') diff --git a/hw/display/sm501.c b/hw/display/sm501.c index 0eecd00701..5b4e4509e1 100644 --- a/hw/display/sm501.c +++ b/hw/display/sm501.c @@ -438,6 +438,12 @@ #define SM501_HWC_WIDTH 64 #define SM501_HWC_HEIGHT 64 +#ifdef CONFIG_PIXMAN +#define DEFAULT_X_PIXMAN 7 +#else +#define DEFAULT_X_PIXMAN 0 +#endif + /* SM501 local memory size taken from "linux/drivers/mfd/sm501.c" */ static const uint32_t sm501_mem_local_size[] = { [0] = 4 * MiB, @@ -730,7 +736,6 @@ static void sm501_2d_operation(SM501State *s) switch (cmd) { case 0: /* BitBlt */ { - static uint32_t tmp_buf[16384]; unsigned int src_x = (s->twoD_source >> 16) & 0x01FFF; unsigned int src_y = s->twoD_source & 0xFFFF; uint32_t src_base = s->twoD_source_base & 0x03FFFFFF; @@ -828,9 +833,11 @@ static void sm501_2d_operation(SM501State *s) de = db + (width + (height - 1) * dst_pitch) * bypp; overlap = (db < se && sb < de); } +#ifdef CONFIG_PIXMAN if (overlap && (s->use_pixman & BIT(2))) { /* pixman can't do reverse blit: copy via temporary */ int tmp_stride = DIV_ROUND_UP(width * bypp, sizeof(uint32_t)); + static uint32_t tmp_buf[16384]; uint32_t *tmp = tmp_buf; if (tmp_stride * sizeof(uint32_t) * height > sizeof(tmp_buf)) { @@ -860,7 +867,9 @@ static void sm501_2d_operation(SM501State *s) dst_pitch * bypp / sizeof(uint32_t), 8 * bypp, 8 * bypp, src_x, src_y, dst_x, dst_y, width, height); - } else { + } else +#endif + { fallback = true; } if (fallback) { @@ -894,20 +903,23 @@ static void sm501_2d_operation(SM501State *s) color = cpu_to_le16(color); } +#ifdef CONFIG_PIXMAN if (!(s->use_pixman & BIT(0)) || (width == 1 && height == 1) || !pixman_fill((uint32_t *)&s->local_mem[dst_base], dst_pitch * bypp / sizeof(uint32_t), 8 * bypp, - dst_x, dst_y, width, height, color)) { - /* fallback when pixman failed or we don't want to call it */ - uint8_t *d = s->local_mem + dst_base; - unsigned int x, y, i; - for (y = 0; y < height; y++) { - i = (dst_x + (dst_y + y) * dst_pitch) * bypp; - for (x = 0; x < width; x++, i += bypp) { - stn_he_p(&d[i], bypp, color); + dst_x, dst_y, width, height, color)) +#endif + { + /* fallback when pixman failed or we don't want to call it */ + uint8_t *d = s->local_mem + dst_base; + unsigned int x, y, i; + for (y = 0; y < height; y++) { + i = (dst_x + (dst_y + y) * dst_pitch) * bypp; + for (x = 0; x < width; x++, i += bypp) { + stn_he_p(&d[i], bypp, color); + } } } - } break; } default: @@ -1878,6 +1890,12 @@ static void sm501_reset(SM501State *s) static void sm501_init(SM501State *s, DeviceState *dev, uint32_t local_mem_bytes) { +#ifndef CONFIG_PIXMAN + if (s->use_pixman != 0) { + warn_report("x-pixman != 0, not effective without PIXMAN"); + } +#endif + s->local_mem_size_index = get_local_mem_size_index(local_mem_bytes); /* local memory */ @@ -2038,7 +2056,8 @@ static void sm501_realize_sysbus(DeviceState *dev, Error **errp) static Property sm501_sysbus_properties[] = { DEFINE_PROP_UINT32("vram-size", SM501SysBusState, vram_size, 0), - DEFINE_PROP_UINT8("x-pixman", SM501SysBusState, state.use_pixman, 7), + /* this a debug option, prefer PROP_UINT over PROP_BIT for simplicity */ + DEFINE_PROP_UINT8("x-pixman", SM501SysBusState, state.use_pixman, DEFAULT_X_PIXMAN), DEFINE_PROP_END_OF_LIST(), }; @@ -2126,7 +2145,7 @@ static void sm501_realize_pci(PCIDevice *dev, Error **errp) static Property sm501_pci_properties[] = { DEFINE_PROP_UINT32("vram-size", SM501PCIState, vram_size, 64 * MiB), - DEFINE_PROP_UINT8("x-pixman", SM501PCIState, state.use_pixman, 7), + DEFINE_PROP_UINT8("x-pixman", SM501PCIState, state.use_pixman, DEFAULT_X_PIXMAN), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c index 1150521d9d..709c8a02a1 100644 --- a/hw/display/vhost-user-gpu.c +++ b/hw/display/vhost-user-gpu.c @@ -307,6 +307,7 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg) dpy_gl_update(con, m->x, m->y, m->width, m->height); break; } +#ifdef CONFIG_PIXMAN case VHOST_USER_GPU_UPDATE: { VhostUserGpuUpdate *m = &msg->payload.update; @@ -334,6 +335,7 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg) } break; } +#endif default: g_warning("unhandled message %d %d", msg->request, msg->size); } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 2707bceea8..b016d3bac8 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -16,6 +16,7 @@ #include "qemu/iov.h" #include "sysemu/cpus.h" #include "ui/console.h" +#include "ui/rect.h" #include "trace.h" #include "sysemu/dma.h" #include "sysemu/sysemu.h" @@ -503,7 +504,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_flush rf; struct virtio_gpu_scanout *scanout; - pixman_region16_t flush_region; + QemuRect flush_rect; bool within_bounds = false; bool update_submitted = false; int i; @@ -565,34 +566,25 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, return; } - pixman_region_init_rect(&flush_region, - rf.r.x, rf.r.y, rf.r.width, rf.r.height); + qemu_rect_init(&flush_rect, rf.r.x, rf.r.y, rf.r.width, rf.r.height); for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { - pixman_region16_t region, finalregion; - pixman_box16_t *extents; + QemuRect rect; if (!(res->scanout_bitmask & (1 << i))) { continue; } scanout = &g->parent_obj.scanout[i]; - pixman_region_init(&finalregion); - pixman_region_init_rect(®ion, scanout->x, scanout->y, - scanout->width, scanout->height); + qemu_rect_init(&rect, scanout->x, scanout->y, + scanout->width, scanout->height); - pixman_region_intersect(&finalregion, &flush_region, ®ion); - pixman_region_translate(&finalregion, -scanout->x, -scanout->y); - extents = pixman_region_extents(&finalregion); /* work out the area we need to update for each console */ - dpy_gfx_update(g->parent_obj.scanout[i].con, - extents->x1, extents->y1, - extents->x2 - extents->x1, - extents->y2 - extents->y1); - - pixman_region_fini(®ion); - pixman_region_fini(&finalregion); + if (qemu_rect_intersect(&flush_rect, &rect, &rect)) { + qemu_rect_translate(&rect, -scanout->x, -scanout->y); + dpy_gfx_update(g->parent_obj.scanout[i].con, + rect.x, rect.y, rect.width, rect.height); + } } - pixman_region_fini(&flush_region); } static void virtio_unref_resource(pixman_image_t *image, void *data) diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 67d4d1b5e0..a3222d3a96 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -87,7 +87,7 @@ static const MemoryRegionOps hppa_pci_ignore_ops = { }, }; -static ISABus *hppa_isa_bus(void) +static ISABus *hppa_isa_bus(hwaddr addr) { ISABus *isa_bus; qemu_irq *isa_irqs; @@ -96,8 +96,7 @@ static ISABus *hppa_isa_bus(void) isa_region = g_new(MemoryRegion, 1); memory_region_init_io(isa_region, NULL, &hppa_pci_ignore_ops, NULL, "isa-io", 0x800); - memory_region_add_subregion(get_system_memory(), IDE_HPA, - isa_region); + memory_region_add_subregion(get_system_memory(), addr, isa_region); isa_bus = isa_bus_new(NULL, get_system_memory(), isa_region, &error_abort); @@ -163,13 +162,24 @@ static const MemoryRegionOps hppa_io_helper_ops = { }, }; +typedef uint64_t TranslateFn(void *opaque, uint64_t addr); -static uint64_t cpu_hppa_to_phys(void *opaque, uint64_t addr) +static uint64_t linux_kernel_virt_to_phys(void *opaque, uint64_t addr) { addr &= (0x10000000 - 1); return addr; } +static uint64_t translate_pa10(void *dummy, uint64_t addr) +{ + return (uint32_t)addr; +} + +static uint64_t translate_pa20(void *dummy, uint64_t addr) +{ + return hppa_abs_to_phys_pa2_w0(addr); +} + static HPPACPU *cpu[HPPA_MAX_CPUS]; static uint64_t firmware_entry; @@ -179,15 +189,17 @@ static void fw_cfg_boot_set(void *opaque, const char *boot_device, fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); } -static FWCfgState *create_fw_cfg(MachineState *ms, PCIBus *pci_bus) +static FWCfgState *create_fw_cfg(MachineState *ms, PCIBus *pci_bus, + hwaddr addr) { FWCfgState *fw_cfg; uint64_t val; const char qemu_version[] = QEMU_VERSION; MachineClass *mc = MACHINE_GET_CLASS(ms); + int btlb_entries = HPPA_BTLB_ENTRIES(&cpu[0]->env); int len; - fw_cfg = fw_cfg_init_mem(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4); + fw_cfg = fw_cfg_init_mem(addr, addr + 4); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, ms->smp.cpus); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, HPPA_MAX_CPUS); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, ms->ram_size); @@ -196,11 +208,11 @@ static FWCfgState *create_fw_cfg(MachineState *ms, PCIBus *pci_bus) fw_cfg_add_file(fw_cfg, "/etc/firmware-min-version", g_memdup(&val, sizeof(val)), sizeof(val)); - val = cpu_to_le64(HPPA_TLB_ENTRIES - HPPA_BTLB_ENTRIES); + val = cpu_to_le64(HPPA_TLB_ENTRIES - btlb_entries); fw_cfg_add_file(fw_cfg, "/etc/cpu/tlb_entries", g_memdup(&val, sizeof(val)), sizeof(val)); - val = cpu_to_le64(HPPA_BTLB_ENTRIES); + val = cpu_to_le64(btlb_entries); fw_cfg_add_file(fw_cfg, "/etc/cpu/btlb_entries", g_memdup(&val, sizeof(val)), sizeof(val)); @@ -257,32 +269,45 @@ static DinoState *dino_init(MemoryRegion *addr_space) /* * Step 1: Create CPUs and Memory */ -static void machine_HP_common_init_cpus(MachineState *machine) +static TranslateFn *machine_HP_common_init_cpus(MachineState *machine) { MemoryRegion *addr_space = get_system_memory(); - MemoryRegion *cpu_region; - long i; unsigned int smp_cpus = machine->smp.cpus; - char *name; + TranslateFn *translate; + MemoryRegion *cpu_region; /* Create CPUs. */ - for (i = 0; i < smp_cpus; i++) { - name = g_strdup_printf("cpu%ld-io-eir", i); + for (unsigned int i = 0; i < smp_cpus; i++) { cpu[i] = HPPA_CPU(cpu_create(machine->cpu_type)); + } + + /* + * For now, treat address layout as if PSW_W is clear. + * TODO: create a proper hppa64 board model and load elf64 firmware. + */ + if (hppa_is_pa20(&cpu[0]->env)) { + translate = translate_pa20; + } else { + translate = translate_pa10; + } + + for (unsigned int i = 0; i < smp_cpus; i++) { + g_autofree char *name = g_strdup_printf("cpu%u-io-eir", i); cpu_region = g_new(MemoryRegion, 1); memory_region_init_io(cpu_region, OBJECT(cpu[i]), &hppa_io_eir_ops, cpu[i], name, 4); - memory_region_add_subregion(addr_space, CPU_HPA + i * 0x1000, + memory_region_add_subregion(addr_space, + translate(NULL, CPU_HPA + i * 0x1000), cpu_region); - g_free(name); } /* RTC and DebugOutputPort on CPU #0 */ cpu_region = g_new(MemoryRegion, 1); memory_region_init_io(cpu_region, OBJECT(cpu[0]), &hppa_io_helper_ops, cpu[0], "cpu0-io-rtc", 2 * sizeof(uint64_t)); - memory_region_add_subregion(addr_space, CPU_HPA + 16, cpu_region); + memory_region_add_subregion(addr_space, translate(NULL, CPU_HPA + 16), + cpu_region); /* Main memory region. */ if (machine->ram_size > 3 * GiB) { @@ -290,12 +315,15 @@ static void machine_HP_common_init_cpus(MachineState *machine) exit(EXIT_FAILURE); } memory_region_add_subregion_overlap(addr_space, 0, machine->ram, -1); + + return translate; } /* * Last creation step: Add SCSI discs, NICs, graphics & load firmware */ -static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus) +static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, + TranslateFn *translate) { const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; @@ -323,13 +351,13 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus) dev = qdev_new("artist"); s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); - sysbus_mmio_map(s, 0, LASI_GFX_HPA); - sysbus_mmio_map(s, 1, ARTIST_FB_ADDR); + sysbus_mmio_map(s, 0, translate(NULL, LASI_GFX_HPA)); + sysbus_mmio_map(s, 1, translate(NULL, ARTIST_FB_ADDR)); } /* Network setup. */ if (enable_lasi_lan()) { - lasi_82596_init(addr_space, LASI_LAN_HPA, + lasi_82596_init(addr_space, translate(NULL, LASI_LAN_HPA), qdev_get_gpio_in(lasi_dev, LASI_IRQ_LAN_HPA)); } @@ -373,7 +401,7 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus) qemu_register_powerdown_notifier(&hppa_system_powerdown_notifier); /* fw_cfg configuration interface */ - create_fw_cfg(machine, pci_bus); + create_fw_cfg(machine, pci_bus, translate(NULL, FW_CFG_IO_BASE)); /* Load firmware. Given that this is not "real" firmware, but one explicitly written for the emulation, we might as @@ -385,15 +413,10 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus) exit(1); } - size = load_elf(firmware_filename, NULL, NULL, NULL, + size = load_elf(firmware_filename, NULL, translate, NULL, &firmware_entry, &firmware_low, &firmware_high, NULL, true, EM_PARISC, 0, 0); - /* Unfortunately, load_elf sign-extends reading elf32. */ - firmware_entry = (target_ureg)firmware_entry; - firmware_low = (target_ureg)firmware_low; - firmware_high = (target_ureg)firmware_high; - if (size < 0) { error_report("could not load firmware '%s'", firmware_filename); exit(1); @@ -401,7 +424,8 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus) qemu_log_mask(CPU_LOG_PAGE, "Firmware loaded at 0x%08" PRIx64 "-0x%08" PRIx64 ", entry at 0x%08" PRIx64 ".\n", firmware_low, firmware_high, firmware_entry); - if (firmware_low < FIRMWARE_START || firmware_high >= FIRMWARE_END) { + if (firmware_low < translate(NULL, FIRMWARE_START) || + firmware_high >= translate(NULL, FIRMWARE_END)) { error_report("Firmware overlaps with memory or IO space"); exit(1); } @@ -410,18 +434,16 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus) rom_region = g_new(MemoryRegion, 1); memory_region_init_ram(rom_region, NULL, "firmware", (FIRMWARE_END - FIRMWARE_START), &error_fatal); - memory_region_add_subregion(addr_space, FIRMWARE_START, rom_region); + memory_region_add_subregion(addr_space, + translate(NULL, FIRMWARE_START), rom_region); /* Load kernel */ if (kernel_filename) { - size = load_elf(kernel_filename, NULL, &cpu_hppa_to_phys, + size = load_elf(kernel_filename, NULL, linux_kernel_virt_to_phys, NULL, &kernel_entry, &kernel_low, &kernel_high, NULL, true, EM_PARISC, 0, 0); - /* Unfortunately, load_elf sign-extends reading elf32. */ - kernel_entry = (target_ureg) cpu_hppa_to_phys(NULL, kernel_entry); - kernel_low = (target_ureg)kernel_low; - kernel_high = (target_ureg)kernel_high; + kernel_entry = linux_kernel_virt_to_phys(NULL, kernel_entry); if (size < 0) { error_report("could not load kernel '%s'", kernel_filename); @@ -499,41 +521,48 @@ static void machine_HP_B160L_init(MachineState *machine) { DeviceState *dev, *dino_dev; MemoryRegion *addr_space = get_system_memory(); + TranslateFn *translate; ISABus *isa_bus; PCIBus *pci_bus; /* Create CPUs and RAM. */ - machine_HP_common_init_cpus(machine); + translate = machine_HP_common_init_cpus(machine); + + if (hppa_is_pa20(&cpu[0]->env)) { + error_report("The HP B160L workstation requires a 32-bit " + "CPU. Use '-machine C3700' instead."); + exit(1); + } /* Init Lasi chip */ lasi_dev = DEVICE(lasi_init()); - memory_region_add_subregion(addr_space, LASI_HPA, + memory_region_add_subregion(addr_space, translate(NULL, LASI_HPA), sysbus_mmio_get_region( SYS_BUS_DEVICE(lasi_dev), 0)); /* Init Dino (PCI host bus chip). */ dino_dev = DEVICE(dino_init(addr_space)); - memory_region_add_subregion(addr_space, DINO_HPA, + memory_region_add_subregion(addr_space, translate(NULL, DINO_HPA), sysbus_mmio_get_region( SYS_BUS_DEVICE(dino_dev), 0)); pci_bus = PCI_BUS(qdev_get_child_bus(dino_dev, "pci")); assert(pci_bus); /* Create ISA bus, needed for PS/2 kbd/mouse port emulation */ - isa_bus = hppa_isa_bus(); + isa_bus = hppa_isa_bus(translate(NULL, IDE_HPA)); assert(isa_bus); /* Serial ports: Lasi and Dino use a 7.272727 MHz clock. */ - serial_mm_init(addr_space, LASI_UART_HPA + 0x800, 0, + serial_mm_init(addr_space, translate(NULL, LASI_UART_HPA + 0x800), 0, qdev_get_gpio_in(lasi_dev, LASI_IRQ_UART_HPA), 7272727 / 16, serial_hd(0), DEVICE_BIG_ENDIAN); - serial_mm_init(addr_space, DINO_UART_HPA + 0x800, 0, + serial_mm_init(addr_space, translate(NULL, DINO_UART_HPA + 0x800), 0, qdev_get_gpio_in(dino_dev, DINO_IRQ_RS232INT), 7272727 / 16, serial_hd(1), DEVICE_BIG_ENDIAN); /* Parallel port */ - parallel_mm_init(addr_space, LASI_LPT_HPA + 0x800, 0, + parallel_mm_init(addr_space, translate(NULL, LASI_LPT_HPA + 0x800), 0, qdev_get_gpio_in(lasi_dev, LASI_IRQ_LAN_HPA), parallel_hds[0]); @@ -542,15 +571,17 @@ static void machine_HP_B160L_init(MachineState *machine) sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(lasi_dev, LASI_IRQ_PS2KBD_HPA)); - memory_region_add_subregion(addr_space, LASI_PS2KBD_HPA, + memory_region_add_subregion(addr_space, + translate(NULL, LASI_PS2KBD_HPA), sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); - memory_region_add_subregion(addr_space, LASI_PS2KBD_HPA + 0x100, + memory_region_add_subregion(addr_space, + translate(NULL, LASI_PS2KBD_HPA + 0x100), sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1)); /* Add SCSI discs, NICs, graphics & load firmware */ - machine_HP_common_init_tail(machine, pci_bus); + machine_HP_common_init_tail(machine, pci_bus, translate); } static AstroState *astro_init(void) @@ -572,21 +603,28 @@ static void machine_HP_C3700_init(MachineState *machine) AstroState *astro; DeviceState *astro_dev; MemoryRegion *addr_space = get_system_memory(); + TranslateFn *translate; /* Create CPUs and RAM. */ - machine_HP_common_init_cpus(machine); + translate = machine_HP_common_init_cpus(machine); + + if (!hppa_is_pa20(&cpu[0]->env)) { + error_report("The HP C3000 workstation requires a 64-bit CPU. " + "Use '-machine B160L' instead."); + exit(1); + } /* Init Astro and the Elroys (PCI host bus chips). */ astro = astro_init(); astro_dev = DEVICE(astro); - memory_region_add_subregion(addr_space, ASTRO_HPA, + memory_region_add_subregion(addr_space, translate(NULL, ASTRO_HPA), sysbus_mmio_get_region( SYS_BUS_DEVICE(astro_dev), 0)); pci_bus = PCI_BUS(qdev_get_child_bus(DEVICE(astro->elroy[0]), "pci")); assert(pci_bus); /* Add SCSI discs, NICs, graphics & load firmware */ - machine_HP_common_init_tail(machine, pci_bus); + machine_HP_common_init_tail(machine, pci_bus, translate); } static void hppa_machine_reset(MachineState *ms, ShutdownCause reason) @@ -608,10 +646,6 @@ static void hppa_machine_reset(MachineState *ms, ShutdownCause reason) cs->exception_index = -1; cs->halted = 0; - - /* clear any existing TLB and BTLB entries */ - memset(cpu[i]->env.tlb, 0, sizeof(cpu[i]->env.tlb)); - cpu[i]->env.tlb_last = HPPA_BTLB_ENTRIES; } /* already initialized by machine_hppa_init()? */ @@ -637,6 +671,11 @@ static void hppa_nmi(NMIState *n, int cpu_index, Error **errp) } } +static const char *HP_B160L_machine_valid_cpu_types[] = { + TYPE_HPPA_CPU, + NULL +}; + static void HP_B160L_machine_init_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -644,6 +683,7 @@ static void HP_B160L_machine_init_class_init(ObjectClass *oc, void *data) mc->desc = "HP B160L workstation"; mc->default_cpu_type = TYPE_HPPA_CPU; + mc->valid_cpu_types = HP_B160L_machine_valid_cpu_types; mc->init = machine_HP_B160L_init; mc->reset = hppa_machine_reset; mc->block_default_type = IF_SCSI; @@ -668,13 +708,19 @@ static const TypeInfo HP_B160L_machine_init_typeinfo = { }, }; +static const char *HP_C3700_machine_valid_cpu_types[] = { + TYPE_HPPA64_CPU, + NULL +}; + static void HP_C3700_machine_init_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); NMIClass *nc = NMI_CLASS(oc); mc->desc = "HP C3700 workstation"; - mc->default_cpu_type = TYPE_HPPA_CPU; + mc->default_cpu_type = TYPE_HPPA64_CPU; + mc->valid_cpu_types = HP_C3700_machine_valid_cpu_types; mc->init = machine_HP_C3700_init; mc->reset = hppa_machine_reset; mc->block_default_type = IF_SCSI; diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c index cef51663d0..1b978e588f 100644 --- a/hw/i2c/pmbus_device.c +++ b/hw/i2c/pmbus_device.c @@ -102,7 +102,6 @@ void pmbus_send_string(PMBusDevice *pmdev, const char *data) } size_t len = strlen(data); - g_assert(len > 0); g_assert(len + pmdev->out_buf_len < SMBUS_DATA_MAX_LEN); pmdev->out_buf[len + pmdev->out_buf_len] = len; @@ -112,6 +111,35 @@ void pmbus_send_string(PMBusDevice *pmdev, const char *data) pmdev->out_buf_len += len + 1; } +uint8_t pmbus_receive_block(PMBusDevice *pmdev, uint8_t *dest, size_t len) +{ + /* dest may contain data from previous writes */ + memset(dest, 0, len); + + /* Exclude command code from return value */ + pmdev->in_buf++; + pmdev->in_buf_len--; + + /* The byte after the command code denotes the length */ + uint8_t sent_len = pmdev->in_buf[0]; + + if (sent_len != pmdev->in_buf_len - 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: length mismatch. Expected %d bytes, got %d bytes\n", + __func__, sent_len, pmdev->in_buf_len - 1); + } + + /* exclude length byte */ + pmdev->in_buf++; + pmdev->in_buf_len--; + + if (pmdev->in_buf_len < len) { + len = pmdev->in_buf_len; + } + memcpy(dest, pmdev->in_buf, len); + return len; +} + static uint64_t pmbus_receive_uint(PMBusDevice *pmdev) { @@ -472,6 +500,54 @@ static uint8_t pmbus_receive_byte(SMBusDevice *smd) } break; + case PMBUS_FAN_CONFIG_1_2: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send8(pmdev, pmdev->pages[index].fan_config_1_2); + } else { + goto passthough; + } + break; + + case PMBUS_FAN_COMMAND_1: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].fan_command_1); + } else { + goto passthough; + } + break; + + case PMBUS_FAN_COMMAND_2: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].fan_command_2); + } else { + goto passthough; + } + break; + + case PMBUS_FAN_CONFIG_3_4: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send8(pmdev, pmdev->pages[index].fan_config_3_4); + } else { + goto passthough; + } + break; + + case PMBUS_FAN_COMMAND_3: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].fan_command_3); + } else { + goto passthough; + } + break; + + case PMBUS_FAN_COMMAND_4: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].fan_command_4); + } else { + goto passthough; + } + break; + case PMBUS_VOUT_OV_FAULT_LIMIT: /* R/W word */ if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { pmbus_send16(pmdev, pmdev->pages[index].vout_ov_fault_limit); @@ -782,6 +858,22 @@ static uint8_t pmbus_receive_byte(SMBusDevice *smd) pmbus_send8(pmdev, pmdev->pages[index].status_mfr_specific); break; + case PMBUS_STATUS_FANS_1_2: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send8(pmdev, pmdev->pages[index].status_fans_1_2); + } else { + goto passthough; + } + break; + + case PMBUS_STATUS_FANS_3_4: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send8(pmdev, pmdev->pages[index].status_fans_3_4); + } else { + goto passthough; + } + break; + case PMBUS_READ_EIN: /* Read-Only block 5 bytes */ if (pmdev->pages[index].page_flags & PB_HAS_EIN) { pmbus_send(pmdev, pmdev->pages[index].read_ein, 5); @@ -814,6 +906,14 @@ static uint8_t pmbus_receive_byte(SMBusDevice *smd) } break; + case PMBUS_READ_VCAP: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_VCAP) { + pmbus_send16(pmdev, pmdev->pages[index].read_vcap); + } else { + goto passthough; + } + break; + case PMBUS_READ_VOUT: /* Read-Only word */ if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { pmbus_send16(pmdev, pmdev->pages[index].read_vout); @@ -854,6 +954,54 @@ static uint8_t pmbus_receive_byte(SMBusDevice *smd) } break; + case PMBUS_READ_FAN_SPEED_1: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].read_fan_speed_1); + } else { + goto passthough; + } + break; + + case PMBUS_READ_FAN_SPEED_2: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].read_fan_speed_2); + } else { + goto passthough; + } + break; + + case PMBUS_READ_FAN_SPEED_3: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].read_fan_speed_3); + } else { + goto passthough; + } + break; + + case PMBUS_READ_FAN_SPEED_4: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].read_fan_speed_4); + } else { + goto passthough; + } + break; + + case PMBUS_READ_DUTY_CYCLE: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].read_duty_cycle); + } else { + goto passthough; + } + break; + + case PMBUS_READ_FREQUENCY: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send16(pmdev, pmdev->pages[index].read_frequency); + } else { + goto passthough; + } + break; + case PMBUS_READ_POUT: /* Read-Only word */ if (pmdev->pages[index].page_flags & PB_HAS_POUT) { pmbus_send16(pmdev, pmdev->pages[index].read_pout); @@ -1096,12 +1244,26 @@ static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) pmdev->in_buf = buf; pmdev->code = buf[0]; /* PMBus command code */ + + if (pmdev->code == PMBUS_CLEAR_FAULTS) { + pmbus_clear_faults(pmdev); + } + if (len == 1) { /* Single length writes are command codes only */ return 0; } if (pmdev->code == PMBUS_PAGE) { pmdev->page = pmbus_receive8(pmdev); + + if (pmdev->page > pmdev->num_pages - 1 && pmdev->page != PB_ALL_PAGES) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: page %u is out of range\n", + __func__, pmdev->page); + pmdev->page = 0; /* undefined behaviour - reset to page 0 */ + pmbus_cml_error(pmdev); + return PMBUS_ERR_BYTE; + } return 0; } @@ -1115,15 +1277,6 @@ static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) return 0; } - if (pmdev->page > pmdev->num_pages - 1) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: page %u is out of range\n", - __func__, pmdev->page); - pmdev->page = 0; /* undefined behaviour - reset to page 0 */ - pmbus_cml_error(pmdev); - return PMBUS_ERR_BYTE; - } - index = pmdev->page; switch (pmdev->code) { @@ -1277,6 +1430,54 @@ static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) } break; + case PMBUS_FAN_CONFIG_1_2: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmdev->pages[index].fan_config_1_2 = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_FAN_COMMAND_1: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmdev->pages[index].fan_command_1 = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_FAN_COMMAND_2: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmdev->pages[index].fan_command_2 = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_FAN_CONFIG_3_4: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmdev->pages[index].fan_config_3_4 = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_FAN_COMMAND_3: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmdev->pages[index].fan_command_3 = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_FAN_COMMAND_4: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmdev->pages[index].fan_command_4 = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + case PMBUS_VOUT_OV_FAULT_LIMIT: /* R/W word */ if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { pmdev->pages[index].vout_ov_fault_limit = pmbus_receive16(pmdev); @@ -1582,6 +1783,22 @@ static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) pmdev->pages[index].status_mfr_specific = pmbus_receive8(pmdev); break; + case PMBUS_STATUS_FANS_1_2: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send8(pmdev, pmdev->pages[index].status_fans_1_2); + } else { + goto passthrough; + } + break; + + case PMBUS_STATUS_FANS_3_4: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_FAN) { + pmbus_send8(pmdev, pmdev->pages[index].status_fans_3_4); + } else { + goto passthrough; + } + break; + case PMBUS_PAGE_PLUS_READ: /* Block Read-only */ case PMBUS_CAPABILITY: /* Read-Only byte */ case PMBUS_COEFFICIENTS: /* Read-only block 5 bytes */ diff --git a/hw/i386/kvm/meson.build b/hw/i386/kvm/meson.build index ab143d6474..a4a2e23c06 100644 --- a/hw/i386/kvm/meson.build +++ b/hw/i386/kvm/meson.build @@ -9,6 +9,7 @@ i386_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files( 'xen_evtchn.c', 'xen_gnttab.c', 'xen_xenstore.c', + 'xen_primary_console.c', 'xenstore_impl.c', )) diff --git a/hw/i386/kvm/trace-events b/hw/i386/kvm/trace-events index e4c82de6f3..67bf7f174e 100644 --- a/hw/i386/kvm/trace-events +++ b/hw/i386/kvm/trace-events @@ -18,3 +18,5 @@ xenstore_watch(const char *path, const char *token) "path %s token %s" xenstore_unwatch(const char *path, const char *token) "path %s token %s" xenstore_reset_watches(void) "" xenstore_watch_event(const char *path, const char *token) "path %s token %s" +xen_primary_console_create(void) "" +xen_primary_console_reset(int port) "port %u" diff --git a/hw/i386/kvm/xen-stubs.c b/hw/i386/kvm/xen-stubs.c index ae406e0b02..d03131e686 100644 --- a/hw/i386/kvm/xen-stubs.c +++ b/hw/i386/kvm/xen-stubs.c @@ -15,6 +15,7 @@ #include "qapi/qapi-commands-misc-target.h" #include "xen_evtchn.h" +#include "xen_primary_console.h" void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector, uint64_t addr, uint32_t data, bool is_masked) @@ -30,6 +31,13 @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data) return false; } +void xen_primary_console_create(void) +{ +} + +void xen_primary_console_set_be_port(uint16_t port) +{ +} #ifdef TARGET_I386 EvtchnInfoList *qmp_xen_event_list(Error **errp) { diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c index b2b4be9983..02b8cbf8df 100644 --- a/hw/i386/kvm/xen_evtchn.c +++ b/hw/i386/kvm/xen_evtchn.c @@ -58,7 +58,15 @@ OBJECT_DECLARE_SIMPLE_TYPE(XenEvtchnState, XEN_EVTCHN) typedef struct XenEvtchnPort { uint32_t vcpu; /* Xen/ACPI vcpu_id */ uint16_t type; /* EVTCHNSTAT_xxxx */ - uint16_t type_val; /* pirq# / virq# / remote port according to type */ + union { + uint16_t val; /* raw value for serialization etc. */ + uint16_t pirq; + uint16_t virq; + struct { + uint16_t port:15; + uint16_t to_qemu:1; /* Only two targets; qemu or loopback */ + } interdomain; + } u; } XenEvtchnPort; /* 32-bit compatibility definitions, also used natively in 32-bit build */ @@ -106,14 +114,6 @@ struct xenevtchn_handle { }; /* - * For unbound/interdomain ports there are only two possible remote - * domains; self and QEMU. Use a single high bit in type_val for that, - * and the low bits for the remote port number (or 0 for unbound). - */ -#define PORT_INFO_TYPEVAL_REMOTE_QEMU 0x8000 -#define PORT_INFO_TYPEVAL_REMOTE_PORT_MASK 0x7FFF - -/* * These 'emuirq' values are used by Xen in the LM stream... and yes, I am * insane enough to think about guest-transparent live migration from actual * Xen to QEMU, and ensuring that we can convert/consume the stream. @@ -210,16 +210,16 @@ static int xen_evtchn_post_load(void *opaque, int version_id) XenEvtchnPort *p = &s->port_table[i]; if (p->type == EVTCHNSTAT_pirq) { - assert(p->type_val); - assert(p->type_val < s->nr_pirqs); + assert(p->u.pirq); + assert(p->u.pirq < s->nr_pirqs); /* * Set the gsi to IRQ_UNBOUND; it may be changed to an actual * GSI# below, or to IRQ_MSI_EMU when the MSI table snooping * catches up with it. */ - s->pirq[p->type_val].gsi = IRQ_UNBOUND; - s->pirq[p->type_val].port = i; + s->pirq[p->u.pirq].gsi = IRQ_UNBOUND; + s->pirq[p->u.pirq].port = i; } } /* Rebuild s->pirq[].gsi mapping */ @@ -243,7 +243,7 @@ static const VMStateDescription xen_evtchn_port_vmstate = { .fields = (VMStateField[]) { VMSTATE_UINT32(vcpu, XenEvtchnPort), VMSTATE_UINT16(type, XenEvtchnPort), - VMSTATE_UINT16(type_val, XenEvtchnPort), + VMSTATE_UINT16(u.val, XenEvtchnPort), VMSTATE_END_OF_LIST() } }; @@ -605,14 +605,13 @@ static void unbind_backend_ports(XenEvtchnState *s) for (i = 1; i < s->nr_ports; i++) { p = &s->port_table[i]; - if (p->type == EVTCHNSTAT_interdomain && - (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU)) { - evtchn_port_t be_port = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK; + if (p->type == EVTCHNSTAT_interdomain && p->u.interdomain.to_qemu) { + evtchn_port_t be_port = p->u.interdomain.port; if (s->be_handles[be_port]) { /* This part will be overwritten on the load anyway. */ p->type = EVTCHNSTAT_unbound; - p->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU; + p->u.interdomain.port = 0; /* Leave the backend port open and unbound too. */ if (kvm_xen_has_cap(EVTCHN_SEND)) { @@ -650,30 +649,22 @@ int xen_evtchn_status_op(struct evtchn_status *status) switch (p->type) { case EVTCHNSTAT_unbound: - if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) { - status->u.unbound.dom = DOMID_QEMU; - } else { - status->u.unbound.dom = xen_domid; - } + status->u.unbound.dom = p->u.interdomain.to_qemu ? DOMID_QEMU + : xen_domid; break; case EVTCHNSTAT_interdomain: - if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) { - status->u.interdomain.dom = DOMID_QEMU; - } else { - status->u.interdomain.dom = xen_domid; - } - - status->u.interdomain.port = p->type_val & - PORT_INFO_TYPEVAL_REMOTE_PORT_MASK; + status->u.interdomain.dom = p->u.interdomain.to_qemu ? DOMID_QEMU + : xen_domid; + status->u.interdomain.port = p->u.interdomain.port; break; case EVTCHNSTAT_pirq: - status->u.pirq = p->type_val; + status->u.pirq = p->u.pirq; break; case EVTCHNSTAT_virq: - status->u.virq = p->type_val; + status->u.virq = p->u.virq; break; } @@ -989,7 +980,7 @@ static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port) static void free_port(XenEvtchnState *s, evtchn_port_t port) { s->port_table[port].type = EVTCHNSTAT_closed; - s->port_table[port].type_val = 0; + s->port_table[port].u.val = 0; s->port_table[port].vcpu = 0; if (s->nr_ports == port + 1) { @@ -1012,7 +1003,7 @@ static int allocate_port(XenEvtchnState *s, uint32_t vcpu, uint16_t type, if (s->port_table[p].type == EVTCHNSTAT_closed) { s->port_table[p].vcpu = vcpu; s->port_table[p].type = type; - s->port_table[p].type_val = val; + s->port_table[p].u.val = val; *port = p; @@ -1053,15 +1044,15 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port, return -ENOENT; case EVTCHNSTAT_pirq: - s->pirq[p->type_val].port = 0; - if (s->pirq[p->type_val].is_translated) { + s->pirq[p->u.pirq].port = 0; + if (s->pirq[p->u.pirq].is_translated) { *flush_kvm_routes = true; } break; case EVTCHNSTAT_virq: - kvm_xen_set_vcpu_virq(virq_is_global(p->type_val) ? 0 : p->vcpu, - p->type_val, 0); + kvm_xen_set_vcpu_virq(virq_is_global(p->u.virq) ? 0 : p->vcpu, + p->u.virq, 0); break; case EVTCHNSTAT_ipi: @@ -1071,8 +1062,8 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port, break; case EVTCHNSTAT_interdomain: - if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) { - uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU; + if (p->u.interdomain.to_qemu) { + uint16_t be_port = p->u.interdomain.port; struct xenevtchn_handle *xc = s->be_handles[be_port]; if (xc) { if (kvm_xen_has_cap(EVTCHN_SEND)) { @@ -1082,14 +1073,15 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port, } } else { /* Loopback interdomain */ - XenEvtchnPort *rp = &s->port_table[p->type_val]; - if (!valid_port(p->type_val) || rp->type_val != port || + XenEvtchnPort *rp = &s->port_table[p->u.interdomain.port]; + if (!valid_port(p->u.interdomain.port) || + rp->u.interdomain.port != port || rp->type != EVTCHNSTAT_interdomain) { error_report("Inconsistent state for interdomain unbind"); } else { /* Set the other end back to unbound */ rp->type = EVTCHNSTAT_unbound; - rp->type_val = 0; + rp->u.interdomain.port = 0; } } break; @@ -1214,7 +1206,7 @@ int xen_evtchn_bind_vcpu_op(struct evtchn_bind_vcpu *vcpu) if (p->type == EVTCHNSTAT_interdomain || p->type == EVTCHNSTAT_unbound || p->type == EVTCHNSTAT_pirq || - (p->type == EVTCHNSTAT_virq && virq_is_global(p->type_val))) { + (p->type == EVTCHNSTAT_virq && virq_is_global(p->u.virq))) { /* * unmask_port() with do_unmask==false will just raise the event * on the new vCPU if the port was already pending. @@ -1359,19 +1351,15 @@ int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi) int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain) { XenEvtchnState *s = xen_evtchn_singleton; - uint16_t type_val; int ret; if (!s) { return -ENOTSUP; } - if (interdomain->remote_dom == DOMID_QEMU) { - type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU; - } else if (interdomain->remote_dom == DOMID_SELF || - interdomain->remote_dom == xen_domid) { - type_val = 0; - } else { + if (interdomain->remote_dom != DOMID_QEMU && + interdomain->remote_dom != DOMID_SELF && + interdomain->remote_dom != xen_domid) { return -ESRCH; } @@ -1382,8 +1370,8 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain) qemu_mutex_lock(&s->port_lock); /* The newly allocated port starts out as unbound */ - ret = allocate_port(s, 0, EVTCHNSTAT_unbound, type_val, - &interdomain->local_port); + ret = allocate_port(s, 0, EVTCHNSTAT_unbound, 0, &interdomain->local_port); + if (ret) { goto out; } @@ -1408,7 +1396,8 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain) assign_kernel_eventfd(lp->type, xc->guest_port, xc->fd); } lp->type = EVTCHNSTAT_interdomain; - lp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU | interdomain->remote_port; + lp->u.interdomain.to_qemu = 1; + lp->u.interdomain.port = interdomain->remote_port; ret = 0; } else { /* Loopback */ @@ -1416,19 +1405,18 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain) XenEvtchnPort *lp = &s->port_table[interdomain->local_port]; /* - * The 'remote' port for loopback must be an unbound port allocated for - * communication with the local domain (as indicated by rp->type_val - * being zero, not PORT_INFO_TYPEVAL_REMOTE_QEMU), and must *not* be - * the port that was just allocated for the local end. + * The 'remote' port for loopback must be an unbound port allocated + * for communication with the local domain, and must *not* be the + * port that was just allocated for the local end. */ if (interdomain->local_port != interdomain->remote_port && - rp->type == EVTCHNSTAT_unbound && rp->type_val == 0) { + rp->type == EVTCHNSTAT_unbound && !rp->u.interdomain.to_qemu) { rp->type = EVTCHNSTAT_interdomain; - rp->type_val = interdomain->local_port; + rp->u.interdomain.port = interdomain->local_port; lp->type = EVTCHNSTAT_interdomain; - lp->type_val = interdomain->remote_port; + lp->u.interdomain.port = interdomain->remote_port; } else { ret = -EINVAL; } @@ -1447,7 +1435,6 @@ int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain) int xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound *alloc) { XenEvtchnState *s = xen_evtchn_singleton; - uint16_t type_val; int ret; if (!s) { @@ -1458,18 +1445,20 @@ int xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound *alloc) return -ESRCH; } - if (alloc->remote_dom == DOMID_QEMU) { - type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU; - } else if (alloc->remote_dom == DOMID_SELF || - alloc->remote_dom == xen_domid) { - type_val = 0; - } else { + if (alloc->remote_dom != DOMID_QEMU && + alloc->remote_dom != DOMID_SELF && + alloc->remote_dom != xen_domid) { return -EPERM; } qemu_mutex_lock(&s->port_lock); - ret = allocate_port(s, 0, EVTCHNSTAT_unbound, type_val, &alloc->port); + ret = allocate_port(s, 0, EVTCHNSTAT_unbound, 0, &alloc->port); + + if (!ret && alloc->remote_dom == DOMID_QEMU) { + XenEvtchnPort *p = &s->port_table[alloc->port]; + p->u.interdomain.to_qemu = 1; + } qemu_mutex_unlock(&s->port_lock); @@ -1496,12 +1485,12 @@ int xen_evtchn_send_op(struct evtchn_send *send) switch (p->type) { case EVTCHNSTAT_interdomain: - if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) { + if (p->u.interdomain.to_qemu) { /* * This is an event from the guest to qemu itself, which is * serving as the driver domain. */ - uint16_t be_port = p->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU; + uint16_t be_port = p->u.interdomain.port; struct xenevtchn_handle *xc = s->be_handles[be_port]; if (xc) { eventfd_write(xc->fd, 1); @@ -1511,7 +1500,7 @@ int xen_evtchn_send_op(struct evtchn_send *send) } } else { /* Loopback interdomain ports; just a complex IPI */ - set_port_pending(s, p->type_val); + set_port_pending(s, p->u.interdomain.port); } break; @@ -1553,8 +1542,7 @@ int xen_evtchn_set_port(uint16_t port) /* QEMU has no business sending to anything but these */ if (p->type == EVTCHNSTAT_virq || - (p->type == EVTCHNSTAT_interdomain && - (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU))) { + (p->type == EVTCHNSTAT_interdomain && p->u.interdomain.to_qemu)) { set_port_pending(s, port); ret = 0; } @@ -2064,7 +2052,7 @@ int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid, switch (gp->type) { case EVTCHNSTAT_interdomain: /* Allow rebinding after migration, preserve port # if possible */ - be_port = gp->type_val & ~PORT_INFO_TYPEVAL_REMOTE_QEMU; + be_port = gp->u.interdomain.port; assert(be_port != 0); if (!s->be_handles[be_port]) { s->be_handles[be_port] = xc; @@ -2085,7 +2073,8 @@ int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid, } gp->type = EVTCHNSTAT_interdomain; - gp->type_val = be_port | PORT_INFO_TYPEVAL_REMOTE_QEMU; + gp->u.interdomain.to_qemu = 1; + gp->u.interdomain.port = be_port; xc->guest_port = guest_port; if (kvm_xen_has_cap(EVTCHN_SEND)) { assign_kernel_eventfd(gp->type, guest_port, xc->fd); @@ -2130,7 +2119,7 @@ int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port) /* This should never *not* be true */ if (gp->type == EVTCHNSTAT_interdomain) { gp->type = EVTCHNSTAT_unbound; - gp->type_val = PORT_INFO_TYPEVAL_REMOTE_QEMU; + gp->u.interdomain.port = 0; } if (kvm_xen_has_cap(EVTCHN_SEND)) { @@ -2284,11 +2273,11 @@ EvtchnInfoList *qmp_xen_event_list(Error **errp) info->type = p->type; if (p->type == EVTCHNSTAT_interdomain) { - info->remote_domain = g_strdup((p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) ? + info->remote_domain = g_strdup(p->u.interdomain.to_qemu ? "qemu" : "loopback"); - info->target = p->type_val & PORT_INFO_TYPEVAL_REMOTE_PORT_MASK; + info->target = p->u.interdomain.port; } else { - info->target = p->type_val; + info->target = p->u.val; /* pirq# or virq# */ } info->vcpu = p->vcpu; info->pending = test_bit(i, pending); diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c index 839ec920a1..0a24f53f20 100644 --- a/hw/i386/kvm/xen_gnttab.c +++ b/hw/i386/kvm/xen_gnttab.c @@ -25,6 +25,7 @@ #include "hw/xen/xen_backend_ops.h" #include "xen_overlay.h" #include "xen_gnttab.h" +#include "xen_primary_console.h" #include "sysemu/kvm.h" #include "sysemu/kvm_xen.h" @@ -537,9 +538,13 @@ int xen_gnttab_reset(void) s->nr_frames = 0; memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames); - s->entries.v1[GNTTAB_RESERVED_XENSTORE].flags = GTF_permit_access; s->entries.v1[GNTTAB_RESERVED_XENSTORE].frame = XEN_SPECIAL_PFN(XENSTORE); + if (xen_primary_console_get_pfn()) { + s->entries.v1[GNTTAB_RESERVED_CONSOLE].flags = GTF_permit_access; + s->entries.v1[GNTTAB_RESERVED_CONSOLE].frame = XEN_SPECIAL_PFN(CONSOLE); + } + return 0; } diff --git a/hw/i386/kvm/xen_primary_console.c b/hw/i386/kvm/xen_primary_console.c new file mode 100644 index 0000000000..abe79f565b --- /dev/null +++ b/hw/i386/kvm/xen_primary_console.c @@ -0,0 +1,193 @@ +/* + * QEMU Xen emulation: Primary console support + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse <dwmw2@infradead.org> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "qapi/error.h" + +#include "hw/sysbus.h" +#include "hw/xen/xen.h" +#include "hw/xen/xen_backend_ops.h" +#include "xen_evtchn.h" +#include "xen_overlay.h" +#include "xen_primary_console.h" + +#include "sysemu/kvm.h" +#include "sysemu/kvm_xen.h" + +#include "trace.h" + +#include "hw/xen/interface/event_channel.h" +#include "hw/xen/interface/grant_table.h" + +#define TYPE_XEN_PRIMARY_CONSOLE "xen-primary-console" +OBJECT_DECLARE_SIMPLE_TYPE(XenPrimaryConsoleState, XEN_PRIMARY_CONSOLE) + +struct XenPrimaryConsoleState { + /*< private >*/ + SysBusDevice busdev; + /*< public >*/ + + MemoryRegion console_page; + void *cp; + + evtchn_port_t guest_port; + evtchn_port_t be_port; + + struct xengntdev_handle *gt; + void *granted_xs; +}; + +struct XenPrimaryConsoleState *xen_primary_console_singleton; + +static void xen_primary_console_realize(DeviceState *dev, Error **errp) +{ + XenPrimaryConsoleState *s = XEN_PRIMARY_CONSOLE(dev); + + if (xen_mode != XEN_EMULATE) { + error_setg(errp, "Xen primary console support is for Xen emulation"); + return; + } + + memory_region_init_ram(&s->console_page, OBJECT(dev), "xen:console_page", + XEN_PAGE_SIZE, &error_abort); + memory_region_set_enabled(&s->console_page, true); + s->cp = memory_region_get_ram_ptr(&s->console_page); + memset(s->cp, 0, XEN_PAGE_SIZE); + + /* We can't map it this early as KVM isn't ready */ + xen_primary_console_singleton = s; +} + +static void xen_primary_console_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xen_primary_console_realize; +} + +static const TypeInfo xen_primary_console_info = { + .name = TYPE_XEN_PRIMARY_CONSOLE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XenPrimaryConsoleState), + .class_init = xen_primary_console_class_init, +}; + + +void xen_primary_console_create(void) +{ + DeviceState *dev = sysbus_create_simple(TYPE_XEN_PRIMARY_CONSOLE, -1, NULL); + + trace_xen_primary_console_create(); + + xen_primary_console_singleton = XEN_PRIMARY_CONSOLE(dev); + + /* + * Defer the init (xen_primary_console_reset()) until KVM is set up and the + * overlay page can be mapped. + */ +} + +static void xen_primary_console_register_types(void) +{ + type_register_static(&xen_primary_console_info); +} + +type_init(xen_primary_console_register_types) + +uint16_t xen_primary_console_get_port(void) +{ + XenPrimaryConsoleState *s = xen_primary_console_singleton; + if (!s) { + return 0; + } + return s->guest_port; +} + +void xen_primary_console_set_be_port(uint16_t port) +{ + XenPrimaryConsoleState *s = xen_primary_console_singleton; + if (s) { + s->be_port = port; + } +} + +uint64_t xen_primary_console_get_pfn(void) +{ + XenPrimaryConsoleState *s = xen_primary_console_singleton; + if (!s) { + return 0; + } + return XEN_SPECIAL_PFN(CONSOLE); +} + +void *xen_primary_console_get_map(void) +{ + XenPrimaryConsoleState *s = xen_primary_console_singleton; + if (!s) { + return 0; + } + return s->cp; +} + +static void alloc_guest_port(XenPrimaryConsoleState *s) +{ + struct evtchn_alloc_unbound alloc = { + .dom = DOMID_SELF, + .remote_dom = DOMID_QEMU, + }; + + if (!xen_evtchn_alloc_unbound_op(&alloc)) { + s->guest_port = alloc.port; + } +} + +static void rebind_guest_port(XenPrimaryConsoleState *s) +{ + struct evtchn_bind_interdomain inter = { + .remote_dom = DOMID_QEMU, + .remote_port = s->be_port, + }; + + if (!xen_evtchn_bind_interdomain_op(&inter)) { + s->guest_port = inter.local_port; + } + + s->be_port = 0; +} + +int xen_primary_console_reset(void) +{ + XenPrimaryConsoleState *s = xen_primary_console_singleton; + if (!s) { + return 0; + } + + if (!memory_region_is_mapped(&s->console_page)) { + uint64_t gpa = XEN_SPECIAL_PFN(CONSOLE) << TARGET_PAGE_BITS; + xen_overlay_do_map_page(&s->console_page, gpa); + } + + if (s->be_port) { + rebind_guest_port(s); + } else { + alloc_guest_port(s); + } + + trace_xen_primary_console_reset(s->guest_port); + + s->gt = qemu_xen_gnttab_open(); + uint32_t xs_gntref = GNTTAB_RESERVED_CONSOLE; + s->granted_xs = qemu_xen_gnttab_map_refs(s->gt, 1, xen_domid, &xs_gntref, + PROT_READ | PROT_WRITE); + + return 0; +} diff --git a/hw/i386/kvm/xen_primary_console.h b/hw/i386/kvm/xen_primary_console.h new file mode 100644 index 0000000000..7e2989ea0d --- /dev/null +++ b/hw/i386/kvm/xen_primary_console.h @@ -0,0 +1,23 @@ +/* + * QEMU Xen emulation: Primary console support + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse <dwmw2@infradead.org> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_XEN_PRIMARY_CONSOLE_H +#define QEMU_XEN_PRIMARY_CONSOLE_H + +void xen_primary_console_create(void); +int xen_primary_console_reset(void); + +uint16_t xen_primary_console_get_port(void); +void xen_primary_console_set_be_port(uint16_t port); +uint64_t xen_primary_console_get_pfn(void); +void *xen_primary_console_get_map(void); + +#endif /* QEMU_XEN_PRIMARY_CONSOLE_H */ diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c index 8e716a7009..6e651960b3 100644 --- a/hw/i386/kvm/xen_xenstore.c +++ b/hw/i386/kvm/xen_xenstore.c @@ -25,6 +25,7 @@ #include "hw/xen/xen_backend_ops.h" #include "xen_overlay.h" #include "xen_evtchn.h" +#include "xen_primary_console.h" #include "xen_xenstore.h" #include "sysemu/kvm.h" @@ -331,7 +332,7 @@ static void xs_error(XenXenstoreState *s, unsigned int id, const char *errstr = NULL; for (unsigned int i = 0; i < ARRAY_SIZE(xsd_errors); i++) { - struct xsd_errors *xsd_error = &xsd_errors[i]; + const struct xsd_errors *xsd_error = &xsd_errors[i]; if (xsd_error->errnum == errnum) { errstr = xsd_error->errstring; @@ -1434,6 +1435,8 @@ static void alloc_guest_port(XenXenstoreState *s) int xen_xenstore_reset(void) { XenXenstoreState *s = xen_xenstore_singleton; + int console_port; + GList *perms; int err; if (!s) { @@ -1461,6 +1464,24 @@ int xen_xenstore_reset(void) } s->be_port = err; + /* Create frontend store nodes */ + perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU)); + perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid)); + + relpath_printf(s, perms, "store/port", "%u", s->guest_port); + relpath_printf(s, perms, "store/ring-ref", "%lu", + XEN_SPECIAL_PFN(XENSTORE)); + + console_port = xen_primary_console_get_port(); + if (console_port) { + relpath_printf(s, perms, "console/ring-ref", "%lu", + XEN_SPECIAL_PFN(CONSOLE)); + relpath_printf(s, perms, "console/port", "%u", console_port); + relpath_printf(s, perms, "console/state", "%u", XenbusStateInitialised); + } + + g_list_free_full(perms, g_free); + /* * We don't actually access the guest's page through the grant, because * this isn't real Xen, and we can just use the page we gave it in the diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 1aef21aa2c..188bc9d0f8 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -1261,7 +1261,7 @@ void pc_basic_device_init(struct PCMachineState *pcms, if (pcms->bus) { pci_create_simple(pcms->bus, -1, "xen-platform"); } - xen_bus_init(); + pcms->xenbus = xen_bus_init(); xen_be_init(); } #endif @@ -1289,7 +1289,8 @@ void pc_basic_device_init(struct PCMachineState *pcms, pcms->vmport != ON_OFF_AUTO_ON); } -void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) +void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus, + BusState *xen_bus) { MachineClass *mc = MACHINE_CLASS(pcmc); int i; @@ -1299,7 +1300,11 @@ void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) NICInfo *nd = &nd_table[i]; const char *model = nd->model ? nd->model : mc->default_nic; - if (g_str_equal(model, "ne2k_isa")) { + if (xen_bus && (!nd->model || g_str_equal(model, "xen-net-device"))) { + DeviceState *dev = qdev_new("xen-net-device"); + qdev_set_nic_properties(dev, nd); + qdev_realize_and_unref(dev, xen_bus, &error_fatal); + } else if (g_str_equal(model, "ne2k_isa")) { pc_init_ne2k_isa(isa_bus, nd); } else { pci_nic_init_nofail(nd, pci_bus, model, NULL); diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 26e161beb9..eace854335 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -342,7 +342,7 @@ static void pc_init1(MachineState *machine, pc_basic_device_init(pcms, isa_bus, x86ms->gsi, rtc_state, true, 0x4); - pc_nic_init(pcmc, isa_bus, pci_bus); + pc_nic_init(pcmc, isa_bus, pci_bus, pcms->xenbus); if (pcmc->pci_enabled) { pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state); diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 597943ff1b..4f3e5412f6 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -340,7 +340,7 @@ static void pc_q35_init(MachineState *machine) /* the rest devices to which pci devfn is automatically assigned */ pc_vga_init(isa_bus, host_bus); - pc_nic_init(pcmc, isa_bus, host_bus); + pc_nic_init(pcmc, isa_bus, host_bus, pcms->xenbus); if (machine->nvdimms_state->is_enabled) { nvdimm_init_acpi_state(machine->nvdimms_state, system_io, diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index 17457ff3de..ef7d3fc05f 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -140,9 +140,14 @@ static void unplug_nic(PCIBus *b, PCIDevice *d, void *o) /* Remove the peer of the NIC device. Normally, this would be a tap device. */ static void del_nic_peer(NICState *nic, void *opaque) { - NetClientState *nc; + NetClientState *nc = qemu_get_queue(nic); + ObjectClass *klass = module_object_class_by_name(nc->model); + + /* Only delete peers of PCI NICs that we're about to delete */ + if (!klass || !object_class_dynamic_cast(klass, TYPE_PCI_DEVICE)) { + return; + } - nc = qemu_get_queue(nic); if (nc->peer) qemu_del_net_client(nc->peer); } @@ -164,39 +169,60 @@ static void pci_unplug_nics(PCIBus *bus) * * [1] https://xenbits.xen.org/gitweb/?p=xen.git;a=blob;f=docs/misc/hvm-emulated-unplug.pandoc */ -static void pci_xen_ide_unplug(PCIDevice *d, bool aux) +struct ide_unplug_state { + bool aux; + int nr_unplugged; +}; + +static int ide_dev_unplug(DeviceState *dev, void *_st) { - DeviceState *dev = DEVICE(d); - PCIIDEState *pci_ide; - int i; + struct ide_unplug_state *st = _st; IDEDevice *idedev; IDEBus *idebus; BlockBackend *blk; + int unit; - pci_ide = PCI_IDE(dev); + idedev = IDE_DEVICE(object_dynamic_cast(OBJECT(dev), "ide-hd")); + if (!idedev) { + return 0; + } - for (i = aux ? 1 : 0; i < 4; i++) { - idebus = &pci_ide->bus[i / 2]; - blk = idebus->ifs[i % 2].blk; + idebus = IDE_BUS(qdev_get_parent_bus(dev)); - if (blk && idebus->ifs[i % 2].drive_kind != IDE_CD) { - if (!(i % 2)) { - idedev = idebus->master; - } else { - idedev = idebus->slave; - } + unit = (idedev == idebus->slave); + assert(unit || idedev == idebus->master); - blk_drain(blk); - blk_flush(blk); + if (st->aux && !unit && !strcmp(BUS(idebus)->name, "ide.0")) { + return 0; + } - blk_detach_dev(blk, DEVICE(idedev)); - idebus->ifs[i % 2].blk = NULL; - idedev->conf.blk = NULL; - monitor_remove_blk(blk); - blk_unref(blk); - } + blk = idebus->ifs[unit].blk; + if (blk) { + blk_drain(blk); + blk_flush(blk); + + blk_detach_dev(blk, DEVICE(idedev)); + idebus->ifs[unit].blk = NULL; + idedev->conf.blk = NULL; + monitor_remove_blk(blk); + blk_unref(blk); + } + + object_unparent(OBJECT(dev)); + st->nr_unplugged++; + + return 0; +} + +static void pci_xen_ide_unplug(PCIDevice *d, bool aux) +{ + struct ide_unplug_state st = { aux, 0 }; + DeviceState *dev = DEVICE(d); + + qdev_walk_children(dev, NULL, NULL, ide_dev_unplug, NULL, &st); + if (st.nr_unplugged) { + pci_device_reset(d); } - pci_device_reset(d); } static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) @@ -211,6 +237,7 @@ static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) switch (pci_get_word(d->config + PCI_CLASS_DEVICE)) { case PCI_CLASS_STORAGE_IDE: + case PCI_CLASS_STORAGE_SATA: pci_xen_ide_unplug(d, aux); break; diff --git a/hw/ide/core.c b/hw/ide/core.c index b5e0dcd29b..63ba665f3d 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -2515,19 +2515,19 @@ static void ide_dummy_transfer_stop(IDEState *s) void ide_bus_reset(IDEBus *bus) { - bus->unit = 0; - bus->cmd = 0; - ide_reset(&bus->ifs[0]); - ide_reset(&bus->ifs[1]); - ide_clear_hob(bus); - - /* pending async DMA */ + /* pending async DMA - needs the IDEState before it is reset */ if (bus->dma->aiocb) { trace_ide_bus_reset_aio(); blk_aio_cancel(bus->dma->aiocb); bus->dma->aiocb = NULL; } + bus->unit = 0; + bus->cmd = 0; + ide_reset(&bus->ifs[0]); + ide_reset(&bus->ifs[1]); + ide_clear_hob(bus); + /* reset dma provider too */ if (bus->dma->ops->reset) { bus->dma->ops->reset(bus->dma); diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c index 79ffbb52a0..203b92c264 100644 --- a/hw/isa/i82378.c +++ b/hw/isa/i82378.c @@ -105,7 +105,9 @@ static void i82378_realize(PCIDevice *pci, Error **errp) /* speaker */ pcspk = isa_new(TYPE_PC_SPEAKER); object_property_set_link(OBJECT(pcspk), "pit", OBJECT(pit), &error_fatal); - isa_realize_and_unref(pcspk, isabus, &error_fatal); + if (!isa_realize_and_unref(pcspk, isabus, errp)) { + return; + } /* 2 82C37 (dma) */ isa_create_simple(isabus, "i82374"); diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index c02be4ce45..52647b4ac7 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -23,6 +23,7 @@ #include "qemu/pmem.h" #include "qemu/range.h" #include "qemu/rcu.h" +#include "qemu/guest-random.h" #include "sysemu/hostmem.h" #include "sysemu/numa.h" #include "hw/cxl/cxl.h" @@ -208,10 +209,9 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) } if (nonvolatile_mr) { + uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0; rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++, - nonvolatile_mr, true, - (volatile_mr ? - memory_region_size(volatile_mr) : 0)); + nonvolatile_mr, true, base); if (rc < 0) { goto error_cleanup; } @@ -514,7 +514,8 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, case A_CXL_RAS_UNC_ERR_STATUS: { uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL); - uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER); + uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, + FIRST_ERROR_POINTER); CXLError *cxl_err; uint32_t unc_err; @@ -533,7 +534,8 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, * closest to behavior of hardware not capable of multiple * header recording. */ - QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, cxl_next) { + QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, + cxl_next) { if ((1 << cxl_err->type) & value) { QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node); g_free(cxl_err); @@ -715,7 +717,8 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) pci_dev, CXL_COMPONENT_REG_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr); - cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate); + cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate, + &ct3d->cci); pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, @@ -885,32 +888,43 @@ static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d, MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, unsigned size, MemTxAttrs attrs) { + CXLType3Dev *ct3d = CXL_TYPE3(d); uint64_t dpa_offset = 0; AddressSpace *as = NULL; int res; - res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size, + res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size, &as, &dpa_offset); if (res) { return MEMTX_ERROR; } + if (sanitize_running(&ct3d->cci)) { + qemu_guest_getrandom_nofail(data, size); + return MEMTX_OK; + } + return address_space_read(as, dpa_offset, attrs, data, size); } MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, unsigned size, MemTxAttrs attrs) { + CXLType3Dev *ct3d = CXL_TYPE3(d); uint64_t dpa_offset = 0; AddressSpace *as = NULL; int res; - res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size, + res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size, &as, &dpa_offset); if (res) { return MEMTX_ERROR; } + if (sanitize_running(&ct3d->cci)) { + return MEMTX_OK; + } + return address_space_write(as, dpa_offset, attrs, &data, size); } @@ -921,7 +935,18 @@ static void ct3d_reset(DeviceState *dev) uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask; cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE); - cxl_device_register_init_common(&ct3d->cxl_dstate); + cxl_device_register_init_t3(ct3d); + + /* + * Bring up an endpoint to target with MCTP over VDM. + * This device is emulating an MLD with single LD for now. + */ + cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci, + DEVICE(ct3d), DEVICE(ct3d), + 512); /* Max payload made up */ + cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d), + 512); /* Max payload made up */ + } static Property ct3_props[] = { @@ -1072,7 +1097,8 @@ void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length, if (((start >= p->start) && (start < p->start + p->length)) || ((start + length > p->start) && (start + length <= p->start + p->length))) { - error_setg(errp, "Overlap with existing poisoned region not supported"); + error_setg(errp, + "Overlap with existing poisoned region not supported"); return; } } @@ -1085,7 +1111,8 @@ void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length, p = g_new0(CXLPoison, 1); p->length = length; p->start = start; - p->type = CXL_POISON_TYPE_INTERNAL; /* Different from injected via the mbox */ + /* Different from injected via the mbox */ + p->type = CXL_POISON_TYPE_INTERNAL; QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); ct3d->poison_list_cnt++; @@ -1222,7 +1249,8 @@ void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type, return; } /* If the error is masked, nothting to do here */ - if (!((1 << cxl_err_type) & ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) { + if (!((1 << cxl_err_type) & + ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) { return; } @@ -1372,7 +1400,8 @@ void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags, bool has_bank, uint8_t bank, bool has_row, uint32_t row, bool has_column, uint16_t column, - bool has_correction_mask, uint64List *correction_mask, + bool has_correction_mask, + uint64List *correction_mask, Error **errp) { Object *obj = object_resolve_path(path, NULL); @@ -1473,7 +1502,7 @@ void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log, int16_t temperature, uint32_t dirty_shutdown_count, uint32_t corrected_volatile_error_count, - uint32_t corrected_persistent_error_count, + uint32_t corrected_persist_error_count, Error **errp) { Object *obj = object_resolve_path(path, NULL); @@ -1513,8 +1542,10 @@ void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log, module.life_used = life_used; stw_le_p(&module.temperature, temperature); stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count); - stl_le_p(&module.corrected_volatile_error_count, corrected_volatile_error_count); - stl_le_p(&module.corrected_persistent_error_count, corrected_persistent_error_count); + stl_le_p(&module.corrected_volatile_error_count, + corrected_volatile_error_count); + stl_le_p(&module.corrected_persistent_error_count, + corrected_persist_error_count); if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) { cxl_event_irq_assert(ct3d); diff --git a/hw/mem/cxl_type3_stubs.c b/hw/mem/cxl_type3_stubs.c index 8ba5d3d1f7..3e1851e32b 100644 --- a/hw/mem/cxl_type3_stubs.c +++ b/hw/mem/cxl_type3_stubs.c @@ -33,7 +33,8 @@ void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags, bool has_bank, uint8_t bank, bool has_row, uint32_t row, bool has_column, uint16_t column, - bool has_correction_mask, uint64List *correction_mask, + bool has_correction_mask, + uint64List *correction_mask, Error **errp) {} void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log, @@ -45,7 +46,7 @@ void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log, int16_t temperature, uint32_t dirty_shutdown_count, uint32_t corrected_volatile_error_count, - uint32_t corrected_persistent_error_count, + uint32_t corrected_persist_error_count, Error **errp) {} void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length, diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig index ac1eb06a51..66ec536e06 100644 --- a/hw/mips/Kconfig +++ b/hw/mips/Kconfig @@ -33,6 +33,7 @@ config JAZZ config FULOONG bool select PCI_BONITO + select VT82C686 config LOONGSON3V bool diff --git a/hw/net/meson.build b/hw/net/meson.build index 2632634df3..f64651c467 100644 --- a/hw/net/meson.build +++ b/hw/net/meson.build @@ -1,5 +1,5 @@ system_ss.add(when: 'CONFIG_DP8393X', if_true: files('dp8393x.c')) -system_ss.add(when: 'CONFIG_XEN', if_true: files('xen_nic.c')) +system_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen_nic.c')) system_ss.add(when: 'CONFIG_NE2000_COMMON', if_true: files('ne2000.c')) # PCI network cards diff --git a/hw/net/trace-events b/hw/net/trace-events index 3abfd65e5b..3097742cc0 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -482,3 +482,14 @@ dp8393x_receive_oversize(int size) "oversize packet, pkt_size is %d" dp8393x_receive_not_netcard(void) "packet not for netcard" dp8393x_receive_packet(int crba) "Receive packet at 0x%"PRIx32 dp8393x_receive_write_status(int crba) "Write status at 0x%"PRIx32 + +# xen_nic.c +xen_netdev_realize(int dev, const char *info, const char *peer) "vif%u info '%s' peer '%s'" +xen_netdev_unrealize(int dev) "vif%u" +xen_netdev_create(int dev) "vif%u" +xen_netdev_destroy(int dev) "vif%u" +xen_netdev_disconnect(int dev) "vif%u" +xen_netdev_connect(int dev, unsigned int tx, unsigned int rx, int port) "vif%u tx %u rx %u port %u" +xen_netdev_frontend_changed(const char *dev, int state) "vif%s state %d" +xen_netdev_tx(int dev, int ref, int off, int len, unsigned int flags, const char *c, const char *d, const char *m, const char *e) "vif%u ref %u off %u len %u flags 0x%x%s%s%s%s" +xen_netdev_rx(int dev, int idx, int status, int flags) "vif%u idx %d status %d flags 0x%x" diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c index 9bbf6599fc..af4ba3f1e6 100644 --- a/hw/net/xen_nic.c +++ b/hw/net/xen_nic.c @@ -20,6 +20,13 @@ */ #include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/qemu-print.h" +#include "qapi/qmp/qdict.h" +#include "qapi/error.h" + #include <sys/socket.h> #include <sys/ioctl.h> #include <sys/wait.h> @@ -27,18 +34,26 @@ #include "net/net.h" #include "net/checksum.h" #include "net/util.h" -#include "hw/xen/xen-legacy-backend.h" + +#include "hw/xen/xen-backend.h" +#include "hw/xen/xen-bus-helper.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/xen/interface/io/netif.h" +#include "hw/xen/interface/io/xs_wire.h" + +#include "trace.h" /* ------------------------------------------------------------- */ struct XenNetDev { - struct XenLegacyDevice xendev; /* must be first */ - char *mac; + struct XenDevice xendev; /* must be first */ + XenEventChannel *event_channel; + int dev; int tx_work; - int tx_ring_ref; - int rx_ring_ref; + unsigned int tx_ring_ref; + unsigned int rx_ring_ref; struct netif_tx_sring *txs; struct netif_rx_sring *rxs; netif_tx_back_ring_t tx_ring; @@ -47,6 +62,11 @@ struct XenNetDev { NICState *nic; }; +typedef struct XenNetDev XenNetDev; + +#define TYPE_XEN_NET_DEVICE "xen-net-device" +OBJECT_DECLARE_SIMPLE_TYPE(XenNetDev, XEN_NET_DEVICE) + /* ------------------------------------------------------------- */ static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st) @@ -68,7 +88,8 @@ static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, i netdev->tx_ring.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify); if (notify) { - xen_pv_send_notify(&netdev->xendev); + xen_device_notify_event_channel(XEN_DEVICE(netdev), + netdev->event_channel, NULL); } if (i == netdev->tx_ring.req_cons) { @@ -104,13 +125,16 @@ static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING #endif } -static void net_tx_packets(struct XenNetDev *netdev) +static bool net_tx_packets(struct XenNetDev *netdev) { + bool done_something = false; netif_tx_request_t txreq; RING_IDX rc, rp; void *page; void *tmpbuf = NULL; + assert(qemu_mutex_iothread_locked()); + for (;;) { rc = netdev->tx_ring.req_cons; rp = netdev->tx_ring.sring->req_prod; @@ -122,49 +146,52 @@ static void net_tx_packets(struct XenNetDev *netdev) } memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); netdev->tx_ring.req_cons = ++rc; + done_something = true; #if 1 /* should not happen in theory, we don't announce the * * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ if (txreq.flags & NETTXF_extra_info) { - xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); + qemu_log_mask(LOG_UNIMP, "vif%u: FIXME: extra info flag\n", + netdev->dev); net_tx_error(netdev, &txreq, rc); continue; } if (txreq.flags & NETTXF_more_data) { - xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); + qemu_log_mask(LOG_UNIMP, "vif%u: FIXME: more data flag\n", + netdev->dev); net_tx_error(netdev, &txreq, rc); continue; } #endif if (txreq.size < 14) { - xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n", - txreq.size); + qemu_log_mask(LOG_GUEST_ERROR, "vif%u: bad packet size: %d\n", + netdev->dev, txreq.size); net_tx_error(netdev, &txreq, rc); continue; } if ((txreq.offset + txreq.size) > XEN_PAGE_SIZE) { - xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n"); + qemu_log_mask(LOG_GUEST_ERROR, "vif%u: error: page crossing\n", + netdev->dev); net_tx_error(netdev, &txreq, rc); continue; } - xen_pv_printf(&netdev->xendev, 3, - "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", - txreq.gref, txreq.offset, txreq.size, txreq.flags, - (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", - (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", - (txreq.flags & NETTXF_more_data) ? " more_data" : "", - (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); + trace_xen_netdev_tx(netdev->dev, txreq.gref, txreq.offset, + txreq.size, txreq.flags, + (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", + (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", + (txreq.flags & NETTXF_more_data) ? " more_data" : "", + (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); - page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref, - PROT_READ); + page = xen_device_map_grant_refs(&netdev->xendev, &txreq.gref, 1, + PROT_READ, NULL); if (page == NULL) { - xen_pv_printf(&netdev->xendev, 0, - "error: tx gref dereference failed (%d)\n", - txreq.gref); + qemu_log_mask(LOG_GUEST_ERROR, + "vif%u: tx gref dereference failed (%d)\n", + netdev->dev, txreq.gref); net_tx_error(netdev, &txreq, rc); continue; } @@ -181,7 +208,8 @@ static void net_tx_packets(struct XenNetDev *netdev) qemu_send_packet(qemu_get_queue(netdev->nic), page + txreq.offset, txreq.size); } - xen_be_unmap_grant_ref(&netdev->xendev, page, txreq.gref); + xen_device_unmap_grant_refs(&netdev->xendev, page, &txreq.gref, 1, + NULL); net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); } if (!netdev->tx_work) { @@ -190,6 +218,7 @@ static void net_tx_packets(struct XenNetDev *netdev) netdev->tx_work = 0; } g_free(tmpbuf); + return done_something; } /* ------------------------------------------------------------- */ @@ -212,14 +241,13 @@ static void net_rx_response(struct XenNetDev *netdev, resp->status = (int16_t)st; } - xen_pv_printf(&netdev->xendev, 3, - "rx response: idx %d, status %d, flags 0x%x\n", - i, resp->status, resp->flags); + trace_xen_netdev_rx(netdev->dev, i, resp->status, resp->flags); netdev->rx_ring.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify); if (notify) { - xen_pv_send_notify(&netdev->xendev); + xen_device_notify_event_channel(XEN_DEVICE(netdev), + netdev->event_channel, NULL); } } @@ -232,7 +260,9 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size RING_IDX rc, rp; void *page; - if (netdev->xendev.be_state != XenbusStateConnected) { + assert(qemu_mutex_iothread_locked()); + + if (xen_device_backend_get_state(&netdev->xendev) != XenbusStateConnected) { return -1; } @@ -244,24 +274,26 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size return 0; } if (size > XEN_PAGE_SIZE - NET_IP_ALIGN) { - xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", - (unsigned long)size, XEN_PAGE_SIZE - NET_IP_ALIGN); + qemu_log_mask(LOG_GUEST_ERROR, "vif%u: packet too big (%lu > %ld)", + netdev->dev, (unsigned long)size, + XEN_PAGE_SIZE - NET_IP_ALIGN); return -1; } memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); netdev->rx_ring.req_cons = ++rc; - page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE); + page = xen_device_map_grant_refs(&netdev->xendev, &rxreq.gref, 1, + PROT_WRITE, NULL); if (page == NULL) { - xen_pv_printf(&netdev->xendev, 0, - "error: rx gref dereference failed (%d)\n", - rxreq.gref); + qemu_log_mask(LOG_GUEST_ERROR, + "vif%u: rx gref dereference failed (%d)\n", + netdev->dev, rxreq.gref); net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); return -1; } memcpy(page + NET_IP_ALIGN, buf, size); - xen_be_unmap_grant_ref(&netdev->xendev, page, rxreq.gref); + xen_device_unmap_grant_refs(&netdev->xendev, page, &rxreq.gref, 1, NULL); net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0); return size; @@ -275,139 +307,361 @@ static NetClientInfo net_xen_info = { .receive = net_rx_packet, }; -static int net_init(struct XenLegacyDevice *xendev) +static void xen_netdev_realize(XenDevice *xendev, Error **errp) { - struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); + ERRP_GUARD(); + XenNetDev *netdev = XEN_NET_DEVICE(xendev); + NetClientState *nc; - /* read xenstore entries */ - if (netdev->mac == NULL) { - netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); - } - - /* do we have all we need? */ - if (netdev->mac == NULL) { - return -1; - } + qemu_macaddr_default_if_unset(&netdev->conf.macaddr); - if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) { - return -1; - } + xen_device_frontend_printf(xendev, "mac", "%02x:%02x:%02x:%02x:%02x:%02x", + netdev->conf.macaddr.a[0], + netdev->conf.macaddr.a[1], + netdev->conf.macaddr.a[2], + netdev->conf.macaddr.a[3], + netdev->conf.macaddr.a[4], + netdev->conf.macaddr.a[5]); netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf, - "xen", NULL, netdev); + object_get_typename(OBJECT(xendev)), + DEVICE(xendev)->id, netdev); - qemu_set_info_str(qemu_get_queue(netdev->nic), - "nic: xenbus vif macaddr=%s", netdev->mac); + nc = qemu_get_queue(netdev->nic); + qemu_format_nic_info_str(nc, netdev->conf.macaddr.a); /* fill info */ - xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1); - xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0); + xen_device_backend_printf(xendev, "feature-rx-copy", "%u", 1); + xen_device_backend_printf(xendev, "feature-rx-flip", "%u", 0); - return 0; + trace_xen_netdev_realize(netdev->dev, nc->info_str, nc->peer ? + nc->peer->name : "(none)"); } -static int net_connect(struct XenLegacyDevice *xendev) +static bool net_event(void *_xendev) { - struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); - int rx_copy; + XenNetDev *netdev = XEN_NET_DEVICE(_xendev); + bool done_something; - if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref", - &netdev->tx_ring_ref) == -1) { - return -1; + done_something = net_tx_packets(netdev); + qemu_flush_queued_packets(qemu_get_queue(netdev->nic)); + return done_something; +} + +static bool xen_netdev_connect(XenDevice *xendev, Error **errp) +{ + XenNetDev *netdev = XEN_NET_DEVICE(xendev); + unsigned int port, rx_copy; + + assert(qemu_mutex_iothread_locked()); + + if (xen_device_frontend_scanf(xendev, "tx-ring-ref", "%u", + &netdev->tx_ring_ref) != 1) { + error_setg(errp, "failed to read tx-ring-ref"); + return false; } - if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref", - &netdev->rx_ring_ref) == -1) { - return 1; + + if (xen_device_frontend_scanf(xendev, "rx-ring-ref", "%u", + &netdev->rx_ring_ref) != 1) { + error_setg(errp, "failed to read rx-ring-ref"); + return false; } - if (xenstore_read_fe_int(&netdev->xendev, "event-channel", - &netdev->xendev.remote_port) == -1) { - return -1; + + if (xen_device_frontend_scanf(xendev, "event-channel", "%u", + &port) != 1) { + error_setg(errp, "failed to read event-channel"); + return false; } - if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) { + if (xen_device_frontend_scanf(xendev, "request-rx-copy", "%u", + &rx_copy) != 1) { rx_copy = 0; } if (rx_copy == 0) { - xen_pv_printf(&netdev->xendev, 0, - "frontend doesn't support rx-copy.\n"); - return -1; + error_setg(errp, "frontend doesn't support rx-copy"); + return false; } - netdev->txs = xen_be_map_grant_ref(&netdev->xendev, - netdev->tx_ring_ref, - PROT_READ | PROT_WRITE); + netdev->txs = xen_device_map_grant_refs(xendev, + &netdev->tx_ring_ref, 1, + PROT_READ | PROT_WRITE, + errp); if (!netdev->txs) { - return -1; + error_prepend(errp, "failed to map tx grant ref: "); + return false; } - netdev->rxs = xen_be_map_grant_ref(&netdev->xendev, - netdev->rx_ring_ref, - PROT_READ | PROT_WRITE); + + netdev->rxs = xen_device_map_grant_refs(xendev, + &netdev->rx_ring_ref, 1, + PROT_READ | PROT_WRITE, + errp); if (!netdev->rxs) { - xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs, - netdev->tx_ring_ref); - netdev->txs = NULL; - return -1; + error_prepend(errp, "failed to map rx grant ref: "); + return false; } + BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XEN_PAGE_SIZE); BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XEN_PAGE_SIZE); - xen_be_bind_evtchn(&netdev->xendev); + netdev->event_channel = xen_device_bind_event_channel(xendev, port, + net_event, + netdev, + errp); + if (!netdev->event_channel) { + return false; + } - xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, " - "remote port %d, local port %d\n", - netdev->tx_ring_ref, netdev->rx_ring_ref, - netdev->xendev.remote_port, netdev->xendev.local_port); + trace_xen_netdev_connect(netdev->dev, netdev->tx_ring_ref, + netdev->rx_ring_ref, port); net_tx_packets(netdev); - return 0; + return true; } -static void net_disconnect(struct XenLegacyDevice *xendev) +static void xen_netdev_disconnect(XenDevice *xendev, Error **errp) { - struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); + XenNetDev *netdev = XEN_NET_DEVICE(xendev); + + trace_xen_netdev_disconnect(netdev->dev); + + assert(qemu_mutex_iothread_locked()); - xen_pv_unbind_evtchn(&netdev->xendev); + netdev->tx_ring.sring = NULL; + netdev->rx_ring.sring = NULL; + if (netdev->event_channel) { + xen_device_unbind_event_channel(xendev, netdev->event_channel, + errp); + netdev->event_channel = NULL; + } if (netdev->txs) { - xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs, - netdev->tx_ring_ref); + xen_device_unmap_grant_refs(xendev, netdev->txs, + &netdev->tx_ring_ref, 1, errp); netdev->txs = NULL; } if (netdev->rxs) { - xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs, - netdev->rx_ring_ref); + xen_device_unmap_grant_refs(xendev, netdev->rxs, + &netdev->rx_ring_ref, 1, errp); netdev->rxs = NULL; } } -static void net_event(struct XenLegacyDevice *xendev) +/* -------------------------------------------------------------------- */ + + +static void xen_netdev_frontend_changed(XenDevice *xendev, + enum xenbus_state frontend_state, + Error **errp) { - struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); - net_tx_packets(netdev); - qemu_flush_queued_packets(qemu_get_queue(netdev->nic)); + ERRP_GUARD(); + enum xenbus_state backend_state = xen_device_backend_get_state(xendev); + + trace_xen_netdev_frontend_changed(xendev->name, frontend_state); + + switch (frontend_state) { + case XenbusStateConnected: + if (backend_state == XenbusStateConnected) { + break; + } + + xen_netdev_disconnect(xendev, errp); + if (*errp) { + break; + } + + if (!xen_netdev_connect(xendev, errp)) { + xen_netdev_disconnect(xendev, NULL); + xen_device_backend_set_state(xendev, XenbusStateClosing); + break; + } + + xen_device_backend_set_state(xendev, XenbusStateConnected); + break; + + case XenbusStateClosing: + xen_device_backend_set_state(xendev, XenbusStateClosing); + break; + + case XenbusStateClosed: + case XenbusStateUnknown: + xen_netdev_disconnect(xendev, errp); + if (*errp) { + break; + } + + xen_device_backend_set_state(xendev, XenbusStateClosed); + break; + + case XenbusStateInitialised: + /* + * Linux netback does nothing on the frontend going (back) to + * XenbusStateInitialised, so do the same here. + */ + default: + break; + } } -static int net_free(struct XenLegacyDevice *xendev) +static char *xen_netdev_get_name(XenDevice *xendev, Error **errp) { - struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); + XenNetDev *netdev = XEN_NET_DEVICE(xendev); + + if (netdev->dev == -1) { + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + char fe_path[XENSTORE_ABS_PATH_MAX + 1]; + int idx = (xen_mode == XEN_EMULATE) ? 0 : 1; + char *value; + + /* Theoretically we could go up to INT_MAX here but that's overkill */ + while (idx < 100) { + snprintf(fe_path, sizeof(fe_path), + "/local/domain/%u/device/vif/%u", + xendev->frontend_id, idx); + value = qemu_xen_xs_read(xenbus->xsh, XBT_NULL, fe_path, NULL); + if (!value) { + if (errno == ENOENT) { + netdev->dev = idx; + goto found; + } + error_setg(errp, "cannot read %s: %s", fe_path, + strerror(errno)); + return NULL; + } + free(value); + idx++; + } + error_setg(errp, "cannot find device index for netdev device"); + return NULL; + } + found: + return g_strdup_printf("%u", netdev->dev); +} + +static void xen_netdev_unrealize(XenDevice *xendev) +{ + XenNetDev *netdev = XEN_NET_DEVICE(xendev); + + trace_xen_netdev_unrealize(netdev->dev); + + /* Disconnect from the frontend in case this has not already happened */ + xen_netdev_disconnect(xendev, NULL); if (netdev->nic) { qemu_del_nic(netdev->nic); - netdev->nic = NULL; } - g_free(netdev->mac); - netdev->mac = NULL; - return 0; } /* ------------------------------------------------------------- */ -struct XenDevOps xen_netdev_ops = { - .size = sizeof(struct XenNetDev), - .flags = DEVOPS_FLAG_NEED_GNTDEV, - .init = net_init, - .initialise = net_connect, - .event = net_event, - .disconnect = net_disconnect, - .free = net_free, +static Property xen_netdev_properties[] = { + DEFINE_NIC_PROPERTIES(XenNetDev, conf), + DEFINE_PROP_INT32("idx", XenNetDev, dev, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xen_netdev_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dev_class = DEVICE_CLASS(class); + XenDeviceClass *xendev_class = XEN_DEVICE_CLASS(class); + + xendev_class->backend = "qnic"; + xendev_class->device = "vif"; + xendev_class->get_name = xen_netdev_get_name; + xendev_class->realize = xen_netdev_realize; + xendev_class->frontend_changed = xen_netdev_frontend_changed; + xendev_class->unrealize = xen_netdev_unrealize; + set_bit(DEVICE_CATEGORY_NETWORK, dev_class->categories); + dev_class->user_creatable = true; + + device_class_set_props(dev_class, xen_netdev_properties); +} + +static const TypeInfo xen_net_type_info = { + .name = TYPE_XEN_NET_DEVICE, + .parent = TYPE_XEN_DEVICE, + .instance_size = sizeof(XenNetDev), + .class_init = xen_netdev_class_init, +}; + +static void xen_net_register_types(void) +{ + type_register_static(&xen_net_type_info); +} + +type_init(xen_net_register_types) + +/* Called to instantiate a XenNetDev when the backend is detected. */ +static void xen_net_device_create(XenBackendInstance *backend, + QDict *opts, Error **errp) +{ + ERRP_GUARD(); + XenBus *xenbus = xen_backend_get_bus(backend); + const char *name = xen_backend_get_name(backend); + XenDevice *xendev = NULL; + unsigned long number; + const char *macstr; + XenNetDev *net; + MACAddr mac; + + if (qemu_strtoul(name, NULL, 10, &number) || number >= INT_MAX) { + error_setg(errp, "failed to parse name '%s'", name); + goto fail; + } + + trace_xen_netdev_create(number); + + macstr = qdict_get_try_str(opts, "mac"); + if (macstr == NULL) { + error_setg(errp, "no MAC address found"); + goto fail; + } + + if (net_parse_macaddr(mac.a, macstr) < 0) { + error_setg(errp, "failed to parse MAC address"); + goto fail; + } + + xendev = XEN_DEVICE(qdev_new(TYPE_XEN_NET_DEVICE)); + net = XEN_NET_DEVICE(xendev); + + net->dev = number; + memcpy(&net->conf.macaddr, &mac, sizeof(mac)); + + if (qdev_realize_and_unref(DEVICE(xendev), BUS(xenbus), errp)) { + xen_backend_set_device(backend, xendev); + return; + } + + error_prepend(errp, "realization of net device %lu failed: ", + number); + + fail: + if (xendev) { + object_unparent(OBJECT(xendev)); + } +} + +static void xen_net_device_destroy(XenBackendInstance *backend, + Error **errp) +{ + ERRP_GUARD(); + XenDevice *xendev = xen_backend_get_device(backend); + XenNetDev *netdev = XEN_NET_DEVICE(xendev); + + trace_xen_netdev_destroy(netdev->dev); + + object_unparent(OBJECT(xendev)); +} + +static const XenBackendInfo xen_net_backend_info = { + .type = "qnic", + .create = xen_net_device_create, + .destroy = xen_net_device_destroy, }; + +static void xen_net_register_backend(void) +{ + xen_backend_register(&xen_net_backend_info); +} + +xen_backend_init(xen_net_register_backend); diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c index 5a2b749c8e..405a133eef 100644 --- a/hw/pci-bridge/cxl_downstream.c +++ b/hw/pci-bridge/cxl_downstream.c @@ -13,6 +13,7 @@ #include "hw/pci/msi.h" #include "hw/pci/pcie.h" #include "hw/pci/pcie_port.h" +#include "hw/cxl/cxl.h" #include "qapi/error.h" typedef struct CXLDownstreamPort { @@ -23,9 +24,6 @@ typedef struct CXLDownstreamPort { CXLComponentState cxl_cstate; } CXLDownstreamPort; -#define TYPE_CXL_DSP "cxl-downstream" -DECLARE_INSTANCE_CHECKER(CXLDownstreamPort, CXL_DSP, TYPE_CXL_DSP) - #define CXL_DOWNSTREAM_PORT_MSI_OFFSET 0x70 #define CXL_DOWNSTREAM_PORT_MSI_NR_VECTOR 1 #define CXL_DOWNSTREAM_PORT_EXP_OFFSET 0x90 @@ -98,7 +96,7 @@ static void build_dvsecs(CXLComponentState *cxl) { uint8_t *dvsec; - dvsec = (uint8_t *)&(CXLDVSECPortExtensions){ 0 }; + dvsec = (uint8_t *)&(CXLDVSECPortExt){ 0 }; cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT, EXTENSIONS_PORT_DVSEC_LENGTH, EXTENSIONS_PORT_DVSEC, @@ -212,6 +210,19 @@ static void cxl_dsp_exitfn(PCIDevice *d) pci_bridge_exitfn(d); } +static void cxl_dsp_instance_post_init(Object *obj) +{ + PCIESlot *s = PCIE_SLOT(obj); + + if (!s->speed) { + s->speed = QEMU_PCI_EXP_LNK_2_5GT; + } + + if (!s->width) { + s->width = QEMU_PCI_EXP_LNK_X1; + } +} + static void cxl_dsp_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -232,6 +243,7 @@ static const TypeInfo cxl_dsp_info = { .name = TYPE_CXL_DSP, .instance_size = sizeof(CXLDownstreamPort), .parent = TYPE_PCIE_SLOT, + .instance_post_init = cxl_dsp_instance_post_init, .class_init = cxl_dsp_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c index 7dfd20aa67..8f97697631 100644 --- a/hw/pci-bridge/cxl_root_port.c +++ b/hw/pci-bridge/cxl_root_port.c @@ -107,7 +107,7 @@ static void build_dvsecs(CXLComponentState *cxl) { uint8_t *dvsec; - dvsec = (uint8_t *)&(CXLDVSECPortExtensions){ 0 }; + dvsec = (uint8_t *)&(CXLDVSECPortExt){ 0 }; cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT, EXTENSIONS_PORT_DVSEC_LENGTH, EXTENSIONS_PORT_DVSEC, diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c index a57806fb31..36737189c6 100644 --- a/hw/pci-bridge/cxl_upstream.c +++ b/hw/pci-bridge/cxl_upstream.c @@ -14,6 +14,7 @@ #include "hw/pci/msi.h" #include "hw/pci/pcie.h" #include "hw/pci/pcie_port.h" +#include "hw/pci-bridge/cxl_upstream_port.h" /* * Null value of all Fs suggested by IEEE RA guidelines for use of * EU, OUI and CID @@ -30,16 +31,6 @@ #define CXL_UPSTREAM_PORT_DVSEC_OFFSET \ (CXL_UPSTREAM_PORT_SN_OFFSET + PCI_EXT_CAP_DSN_SIZEOF) -typedef struct CXLUpstreamPort { - /*< private >*/ - PCIEPort parent_obj; - - /*< public >*/ - CXLComponentState cxl_cstate; - DOECap doe_cdat; - uint64_t sn; -} CXLUpstreamPort; - CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp) { return &usp->cxl_cstate; @@ -116,7 +107,7 @@ static void build_dvsecs(CXLComponentState *cxl) { uint8_t *dvsec; - dvsec = (uint8_t *)&(CXLDVSECPortExtensions){ + dvsec = (uint8_t *)&(CXLDVSECPortExt){ .status = 0x1, /* Port Power Management Init Complete */ }; cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT, diff --git a/hw/pci-host/astro.c b/hw/pci-host/astro.c index 84e0ca14ac..bd226581af 100644 --- a/hw/pci-host/astro.c +++ b/hw/pci-host/astro.c @@ -19,6 +19,8 @@ #define TYPE_ASTRO_IOMMU_MEMORY_REGION "astro-iommu-memory-region" +#define F_EXTEND(addr) ((addr) | MAKE_64BIT_MASK(32, 32)) + #include "qemu/osdep.h" #include "qemu/module.h" #include "qemu/units.h" @@ -386,7 +388,7 @@ static void elroy_set_irq(void *opaque, int irq, int level) uint32_t ena = bit & ~old_ilr; s->ilr = old_ilr | bit; if (ena != 0) { - stl_be_phys(&address_space_memory, cpu_hpa, val & 63); + stl_be_phys(&address_space_memory, F_EXTEND(cpu_hpa), val & 63); } } else { s->ilr = old_ilr & ~bit; @@ -825,15 +827,16 @@ static void astro_realize(DeviceState *obj, Error **errp) /* map elroys mmio */ map_size = LMMIO_DIST_BASE_SIZE / ROPES_PER_IOC; - map_addr = (uint32_t) (LMMIO_DIST_BASE_ADDR + rope * map_size); + map_addr = F_EXTEND(LMMIO_DIST_BASE_ADDR + rope * map_size); memory_region_init_alias(&elroy->pci_mmio_alias, OBJECT(elroy), "pci-mmio-alias", - &elroy->pci_mmio, map_addr, map_size); + &elroy->pci_mmio, (uint32_t) map_addr, map_size); memory_region_add_subregion(get_system_memory(), map_addr, &elroy->pci_mmio_alias); + /* map elroys io */ map_size = IOS_DIST_BASE_SIZE / ROPES_PER_IOC; - map_addr = (uint32_t) (IOS_DIST_BASE_ADDR + rope * map_size); + map_addr = F_EXTEND(IOS_DIST_BASE_ADDR + rope * map_size); memory_region_add_subregion(get_system_memory(), map_addr, &elroy->pci_io); diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index e04114fb3c..384226296b 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -834,6 +834,7 @@ static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, static DeviceState *ppce500_init_mpic_kvm(const PPCE500MachineClass *pmc, IrqLines *irqs, Error **errp) { +#ifdef CONFIG_KVM DeviceState *dev; CPUState *cs; @@ -854,6 +855,9 @@ static DeviceState *ppce500_init_mpic_kvm(const PPCE500MachineClass *pmc, } return dev; +#else + g_assert_not_reached(); +#endif } static DeviceState *ppce500_init_mpic(PPCE500MachineState *pms, diff --git a/hw/s390x/css.c b/hw/s390x/css.c index 95d1b3a3ce..bcedec2fc8 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -644,8 +644,9 @@ void css_conditional_io_interrupt(SubchDev *sch) } } -int css_do_sic(CPUS390XState *env, uint8_t isc, uint16_t mode) +int css_do_sic(S390CPU *cpu, uint8_t isc, uint16_t mode) { + CPUS390XState *env = &cpu->env; S390FLICState *fs = s390_get_flic(); S390FLICStateClass *fsc = s390_get_flic_class(fs); int r; diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index d339cbb7e4..893e71a41b 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -269,9 +269,9 @@ static void sclp_execute(SCLPDevice *sclp, SCCB *sccb, uint32_t code) * service_interrupt call. */ #define SCLP_PV_DUMMY_ADDR 0x4000 -int sclp_service_call_protected(CPUS390XState *env, uint64_t sccb, - uint32_t code) +int sclp_service_call_protected(S390CPU *cpu, uint64_t sccb, uint32_t code) { + CPUS390XState *env = &cpu->env; SCLPDevice *sclp = get_sclp_device(); SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); SCCBHeader header; @@ -296,8 +296,9 @@ out_write: return 0; } -int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code) +int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code) { + CPUS390XState *env = &cpu->env; SCLPDevice *sclp = get_sclp_device(); SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); SCCBHeader header; diff --git a/hw/sd/aspeed_sdhci.c b/hw/sd/aspeed_sdhci.c index be8cafd65f..e53206d959 100644 --- a/hw/sd/aspeed_sdhci.c +++ b/hw/sd/aspeed_sdhci.c @@ -198,16 +198,13 @@ static void aspeed_sdhci_class_init(ObjectClass *classp, void *data) device_class_set_props(dc, aspeed_sdhci_properties); } -static const TypeInfo aspeed_sdhci_info = { - .name = TYPE_ASPEED_SDHCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(AspeedSDHCIState), - .class_init = aspeed_sdhci_class_init, +static const TypeInfo aspeed_sdhci_types[] = { + { + .name = TYPE_ASPEED_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSDHCIState), + .class_init = aspeed_sdhci_class_init, + }, }; -static void aspeed_sdhci_register_types(void) -{ - type_register_static(&aspeed_sdhci_info); -} - -type_init(aspeed_sdhci_register_types) +DEFINE_TYPES(aspeed_sdhci_types) diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c index 9431c35914..a600cf39e2 100644 --- a/hw/sd/bcm2835_sdhost.c +++ b/hw/sd/bcm2835_sdhost.c @@ -436,24 +436,19 @@ static void bcm2835_sdhost_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_bcm2835_sdhost; } -static const TypeInfo bcm2835_sdhost_info = { - .name = TYPE_BCM2835_SDHOST, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(BCM2835SDHostState), - .class_init = bcm2835_sdhost_class_init, - .instance_init = bcm2835_sdhost_init, +static const TypeInfo bcm2835_sdhost_types[] = { + { + .name = TYPE_BCM2835_SDHOST, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835SDHostState), + .class_init = bcm2835_sdhost_class_init, + .instance_init = bcm2835_sdhost_init, + }, + { + .name = TYPE_BCM2835_SDHOST_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), + }, }; -static const TypeInfo bcm2835_sdhost_bus_info = { - .name = TYPE_BCM2835_SDHOST_BUS, - .parent = TYPE_SD_BUS, - .instance_size = sizeof(SDBus), -}; - -static void bcm2835_sdhost_register_types(void) -{ - type_register_static(&bcm2835_sdhost_info); - type_register_static(&bcm2835_sdhost_bus_info); -} - -type_init(bcm2835_sdhost_register_types) +DEFINE_TYPES(bcm2835_sdhost_types) diff --git a/hw/sd/cadence_sdhci.c b/hw/sd/cadence_sdhci.c index 75db34befe..ef4e0d74e3 100644 --- a/hw/sd/cadence_sdhci.c +++ b/hw/sd/cadence_sdhci.c @@ -175,17 +175,14 @@ static void cadence_sdhci_class_init(ObjectClass *classp, void *data) dc->vmsd = &vmstate_cadence_sdhci; } -static const TypeInfo cadence_sdhci_info = { - .name = TYPE_CADENCE_SDHCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(CadenceSDHCIState), - .instance_init = cadence_sdhci_instance_init, - .class_init = cadence_sdhci_class_init, +static const TypeInfo cadence_sdhci_types[] = { + { + .name = TYPE_CADENCE_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CadenceSDHCIState), + .instance_init = cadence_sdhci_instance_init, + .class_init = cadence_sdhci_class_init, + }, }; -static void cadence_sdhci_register_types(void) -{ - type_register_static(&cadence_sdhci_info); -} - -type_init(cadence_sdhci_register_types) +DEFINE_TYPES(cadence_sdhci_types) diff --git a/hw/sd/core.c b/hw/sd/core.c index 30ee62c510..52d5d90045 100644 --- a/hw/sd/core.c +++ b/hw/sd/core.c @@ -259,16 +259,13 @@ void sdbus_reparent_card(SDBus *from, SDBus *to) sdbus_set_readonly(to, readonly); } -static const TypeInfo sd_bus_info = { - .name = TYPE_SD_BUS, - .parent = TYPE_BUS, - .instance_size = sizeof(SDBus), - .class_size = sizeof(SDBusClass), +static const TypeInfo sd_bus_types[] = { + { + .name = TYPE_SD_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(SDBus), + .class_size = sizeof(SDBusClass), + }, }; -static void sd_bus_register_types(void) -{ - type_register_static(&sd_bus_info); -} - -type_init(sd_bus_register_types) +DEFINE_TYPES(sd_bus_types) diff --git a/hw/sd/npcm7xx_sdhci.c b/hw/sd/npcm7xx_sdhci.c index b2f5b4a542..9958680090 100644 --- a/hw/sd/npcm7xx_sdhci.c +++ b/hw/sd/npcm7xx_sdhci.c @@ -166,17 +166,14 @@ static void npcm7xx_sdhci_instance_init(Object *obj) TYPE_SYSBUS_SDHCI); } -static const TypeInfo npcm7xx_sdhci_info = { - .name = TYPE_NPCM7XX_SDHCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(NPCM7xxSDHCIState), - .instance_init = npcm7xx_sdhci_instance_init, - .class_init = npcm7xx_sdhci_class_init, +static const TypeInfo npcm7xx_sdhci_types[] = { + { + .name = TYPE_NPCM7XX_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxSDHCIState), + .instance_init = npcm7xx_sdhci_instance_init, + .class_init = npcm7xx_sdhci_class_init, + }, }; -static void npcm7xx_sdhci_register_types(void) -{ - type_register_static(&npcm7xx_sdhci_info); -} - -type_init(npcm7xx_sdhci_register_types) +DEFINE_TYPES(npcm7xx_sdhci_types) diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c index 5e554bd467..2b33814d83 100644 --- a/hw/sd/pl181.c +++ b/hw/sd/pl181.c @@ -519,14 +519,6 @@ static void pl181_class_init(ObjectClass *klass, void *data) k->user_creatable = false; } -static const TypeInfo pl181_info = { - .name = TYPE_PL181, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PL181State), - .instance_init = pl181_init, - .class_init = pl181_class_init, -}; - static void pl181_bus_class_init(ObjectClass *klass, void *data) { SDBusClass *sbc = SD_BUS_CLASS(klass); @@ -535,17 +527,20 @@ static void pl181_bus_class_init(ObjectClass *klass, void *data) sbc->set_readonly = pl181_set_readonly; } -static const TypeInfo pl181_bus_info = { - .name = TYPE_PL181_BUS, - .parent = TYPE_SD_BUS, - .instance_size = sizeof(SDBus), - .class_init = pl181_bus_class_init, +static const TypeInfo pl181_info[] = { + { + .name = TYPE_PL181, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL181State), + .instance_init = pl181_init, + .class_init = pl181_class_init, + }, + { + .name = TYPE_PL181_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), + .class_init = pl181_bus_class_init, + }, }; -static void pl181_register_types(void) -{ - type_register_static(&pl181_info); - type_register_static(&pl181_bus_info); -} - -type_init(pl181_register_types) +DEFINE_TYPES(pl181_info) diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c index 4749e935d8..5e8ea69188 100644 --- a/hw/sd/pxa2xx_mmci.c +++ b/hw/sd/pxa2xx_mmci.c @@ -575,25 +575,20 @@ static void pxa2xx_mmci_bus_class_init(ObjectClass *klass, void *data) sbc->set_readonly = pxa2xx_mmci_set_readonly; } -static const TypeInfo pxa2xx_mmci_info = { - .name = TYPE_PXA2XX_MMCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxMMCIState), - .instance_init = pxa2xx_mmci_instance_init, - .class_init = pxa2xx_mmci_class_init, +static const TypeInfo pxa2xx_mmci_types[] = { + { + .name = TYPE_PXA2XX_MMCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxMMCIState), + .instance_init = pxa2xx_mmci_instance_init, + .class_init = pxa2xx_mmci_class_init, + }, + { + .name = TYPE_PXA2XX_MMCI_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), + .class_init = pxa2xx_mmci_bus_class_init, + }, }; -static const TypeInfo pxa2xx_mmci_bus_info = { - .name = TYPE_PXA2XX_MMCI_BUS, - .parent = TYPE_SD_BUS, - .instance_size = sizeof(SDBus), - .class_init = pxa2xx_mmci_bus_class_init, -}; - -static void pxa2xx_mmci_register_types(void) -{ - type_register_static(&pxa2xx_mmci_info); - type_register_static(&pxa2xx_mmci_bus_info); -} - -type_init(pxa2xx_mmci_register_types) +DEFINE_TYPES(pxa2xx_mmci_types) diff --git a/hw/sd/sd.c b/hw/sd/sd.c index 4823befdef..1106ff7d78 100644 --- a/hw/sd/sd.c +++ b/hw/sd/sd.c @@ -2278,16 +2278,6 @@ static void sd_class_init(ObjectClass *klass, void *data) sc->proto = &sd_proto_sd; } -static const TypeInfo sd_info = { - .name = TYPE_SD_CARD, - .parent = TYPE_DEVICE, - .instance_size = sizeof(SDState), - .class_size = sizeof(SDCardClass), - .class_init = sd_class_init, - .instance_init = sd_instance_init, - .instance_finalize = sd_instance_finalize, -}; - /* * We do not model the chip select pin, so allow the board to select * whether card should be in SSI or MMC/SD mode. It is also up to the @@ -2303,16 +2293,21 @@ static void sd_spi_class_init(ObjectClass *klass, void *data) sc->proto = &sd_proto_spi; } -static const TypeInfo sd_spi_info = { - .name = TYPE_SD_CARD_SPI, - .parent = TYPE_SD_CARD, - .class_init = sd_spi_class_init, +static const TypeInfo sd_types[] = { + { + .name = TYPE_SD_CARD, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SDState), + .class_size = sizeof(SDCardClass), + .class_init = sd_class_init, + .instance_init = sd_instance_init, + .instance_finalize = sd_instance_finalize, + }, + { + .name = TYPE_SD_CARD_SPI, + .parent = TYPE_SD_CARD, + .class_init = sd_spi_class_init, + }, }; -static void sd_register_types(void) -{ - type_register_static(&sd_info); - type_register_static(&sd_spi_info); -} - -type_init(sd_register_types) +DEFINE_TYPES(sd_types) diff --git a/hw/sd/sdhci-pci.c b/hw/sd/sdhci-pci.c index c737c8b930..9b7bee8b3f 100644 --- a/hw/sd/sdhci-pci.c +++ b/hw/sd/sdhci-pci.c @@ -68,20 +68,17 @@ static void sdhci_pci_class_init(ObjectClass *klass, void *data) sdhci_common_class_init(klass, data); } -static const TypeInfo sdhci_pci_info = { - .name = TYPE_PCI_SDHCI, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(SDHCIState), - .class_init = sdhci_pci_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_CONVENTIONAL_PCI_DEVICE }, - { }, +static const TypeInfo sdhci_pci_types[] = { + { + .name = TYPE_PCI_SDHCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(SDHCIState), + .class_init = sdhci_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, }, }; -static void sdhci_pci_register_type(void) -{ - type_register_static(&sdhci_pci_info); -} - -type_init(sdhci_pci_register_type) +DEFINE_TYPES(sdhci_pci_types) diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c index 167c03b780..a6cc1ad6c8 100644 --- a/hw/sd/ssi-sd.c +++ b/hw/sd/ssi-sd.c @@ -403,16 +403,13 @@ static void ssi_sd_class_init(ObjectClass *klass, void *data) dc->user_creatable = false; } -static const TypeInfo ssi_sd_info = { - .name = TYPE_SSI_SD, - .parent = TYPE_SSI_PERIPHERAL, - .instance_size = sizeof(ssi_sd_state), - .class_init = ssi_sd_class_init, +static const TypeInfo ssi_sd_types[] = { + { + .name = TYPE_SSI_SD, + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(ssi_sd_state), + .class_init = ssi_sd_class_init, + }, }; -static void ssi_sd_register_types(void) -{ - type_register_static(&ssi_sd_info); -} - -type_init(ssi_sd_register_types) +DEFINE_TYPES(ssi_sd_types) diff --git a/hw/sensor/Kconfig b/hw/sensor/Kconfig index e03bd09b50..bc6331b4ab 100644 --- a/hw/sensor/Kconfig +++ b/hw/sensor/Kconfig @@ -22,6 +22,11 @@ config ADM1272 bool depends on I2C +config ADM1266 + bool + depends on PMBUS + default y if PMBUS + config MAX34451 bool depends on I2C diff --git a/hw/sensor/adm1266.c b/hw/sensor/adm1266.c new file mode 100644 index 0000000000..5ae4f82ba1 --- /dev/null +++ b/hw/sensor/adm1266.c @@ -0,0 +1,254 @@ +/* + * Analog Devices ADM1266 Cascadable Super Sequencer with Margin Control and + * Fault Recording with PMBus + * + * https://www.analog.com/media/en/technical-documentation/data-sheets/adm1266.pdf + * + * Copyright 2023 Google LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/i2c/pmbus_device.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define TYPE_ADM1266 "adm1266" +OBJECT_DECLARE_SIMPLE_TYPE(ADM1266State, ADM1266) + +#define ADM1266_BLACKBOX_CONFIG 0xD3 +#define ADM1266_PDIO_CONFIG 0xD4 +#define ADM1266_READ_STATE 0xD9 +#define ADM1266_READ_BLACKBOX 0xDE +#define ADM1266_SET_RTC 0xDF +#define ADM1266_GPIO_SYNC_CONFIGURATION 0xE1 +#define ADM1266_BLACKBOX_INFORMATION 0xE6 +#define ADM1266_PDIO_STATUS 0xE9 +#define ADM1266_GPIO_STATUS 0xEA + +/* Defaults */ +#define ADM1266_OPERATION_DEFAULT 0x80 +#define ADM1266_CAPABILITY_DEFAULT 0xA0 +#define ADM1266_CAPABILITY_NO_PEC 0x20 +#define ADM1266_PMBUS_REVISION_DEFAULT 0x22 +#define ADM1266_MFR_ID_DEFAULT "ADI" +#define ADM1266_MFR_ID_DEFAULT_LEN 32 +#define ADM1266_MFR_MODEL_DEFAULT "ADM1266-A1" +#define ADM1266_MFR_MODEL_DEFAULT_LEN 32 +#define ADM1266_MFR_REVISION_DEFAULT "25" +#define ADM1266_MFR_REVISION_DEFAULT_LEN 8 + +#define ADM1266_NUM_PAGES 17 +/** + * PAGE Index + * Page 0 VH1. + * Page 1 VH2. + * Page 2 VH3. + * Page 3 VH4. + * Page 4 VP1. + * Page 5 VP2. + * Page 6 VP3. + * Page 7 VP4. + * Page 8 VP5. + * Page 9 VP6. + * Page 10 VP7. + * Page 11 VP8. + * Page 12 VP9. + * Page 13 VP10. + * Page 14 VP11. + * Page 15 VP12. + * Page 16 VP13. + */ +typedef struct ADM1266State { + PMBusDevice parent; + + char mfr_id[32]; + char mfr_model[32]; + char mfr_rev[8]; +} ADM1266State; + +static const uint8_t adm1266_ic_device_id[] = {0x03, 0x41, 0x12, 0x66}; +static const uint8_t adm1266_ic_device_rev[] = {0x08, 0x01, 0x08, 0x07, 0x0, + 0x0, 0x07, 0x41, 0x30}; + +static void adm1266_exit_reset(Object *obj) +{ + ADM1266State *s = ADM1266(obj); + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + + pmdev->page = 0; + pmdev->capability = ADM1266_CAPABILITY_NO_PEC; + + for (int i = 0; i < ADM1266_NUM_PAGES; i++) { + pmdev->pages[i].operation = ADM1266_OPERATION_DEFAULT; + pmdev->pages[i].revision = ADM1266_PMBUS_REVISION_DEFAULT; + pmdev->pages[i].vout_mode = 0; + pmdev->pages[i].read_vout = pmbus_data2linear_mode(12, 0); + pmdev->pages[i].vout_margin_high = pmbus_data2linear_mode(15, 0); + pmdev->pages[i].vout_margin_low = pmbus_data2linear_mode(3, 0); + pmdev->pages[i].vout_ov_fault_limit = pmbus_data2linear_mode(16, 0); + pmdev->pages[i].revision = ADM1266_PMBUS_REVISION_DEFAULT; + } + + strncpy(s->mfr_id, ADM1266_MFR_ID_DEFAULT, 4); + strncpy(s->mfr_model, ADM1266_MFR_MODEL_DEFAULT, 11); + strncpy(s->mfr_rev, ADM1266_MFR_REVISION_DEFAULT, 3); +} + +static uint8_t adm1266_read_byte(PMBusDevice *pmdev) +{ + ADM1266State *s = ADM1266(pmdev); + + switch (pmdev->code) { + case PMBUS_MFR_ID: /* R/W block */ + pmbus_send_string(pmdev, s->mfr_id); + break; + + case PMBUS_MFR_MODEL: /* R/W block */ + pmbus_send_string(pmdev, s->mfr_model); + break; + + case PMBUS_MFR_REVISION: /* R/W block */ + pmbus_send_string(pmdev, s->mfr_rev); + break; + + case PMBUS_IC_DEVICE_ID: + pmbus_send(pmdev, adm1266_ic_device_id, sizeof(adm1266_ic_device_id)); + break; + + case PMBUS_IC_DEVICE_REV: + pmbus_send(pmdev, adm1266_ic_device_rev, sizeof(adm1266_ic_device_rev)); + break; + + default: + qemu_log_mask(LOG_UNIMP, + "%s: reading from unimplemented register: 0x%02x\n", + __func__, pmdev->code); + return 0xFF; + } + + return 0; +} + +static int adm1266_write_data(PMBusDevice *pmdev, const uint8_t *buf, + uint8_t len) +{ + ADM1266State *s = ADM1266(pmdev); + + switch (pmdev->code) { + case PMBUS_MFR_ID: /* R/W block */ + pmbus_receive_block(pmdev, (uint8_t *)s->mfr_id, sizeof(s->mfr_id)); + break; + + case PMBUS_MFR_MODEL: /* R/W block */ + pmbus_receive_block(pmdev, (uint8_t *)s->mfr_model, + sizeof(s->mfr_model)); + break; + + case PMBUS_MFR_REVISION: /* R/W block*/ + pmbus_receive_block(pmdev, (uint8_t *)s->mfr_rev, sizeof(s->mfr_rev)); + break; + + case ADM1266_SET_RTC: /* do nothing */ + break; + + default: + qemu_log_mask(LOG_UNIMP, + "%s: writing to unimplemented register: 0x%02x\n", + __func__, pmdev->code); + break; + } + return 0; +} + +static void adm1266_get(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + uint16_t value; + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + PMBusVoutMode *mode = (PMBusVoutMode *)&pmdev->pages[0].vout_mode; + + if (strcmp(name, "vout") == 0) { + value = pmbus_linear_mode2data(*(uint16_t *)opaque, mode->exp); + } else { + value = *(uint16_t *)opaque; + } + + visit_type_uint16(v, name, &value, errp); +} + +static void adm1266_set(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + uint16_t *internal = opaque; + uint16_t value; + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + PMBusVoutMode *mode = (PMBusVoutMode *)&pmdev->pages[0].vout_mode; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + *internal = pmbus_data2linear_mode(value, mode->exp); + pmbus_check_limits(pmdev); +} + +static const VMStateDescription vmstate_adm1266 = { + .name = "ADM1266", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]){ + VMSTATE_PMBUS_DEVICE(parent, ADM1266State), + VMSTATE_END_OF_LIST() + } +}; + +static void adm1266_init(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + uint64_t flags = PB_HAS_VOUT_MODE | PB_HAS_VOUT | PB_HAS_VOUT_MARGIN | + PB_HAS_VOUT_RATING | PB_HAS_STATUS_MFR_SPECIFIC; + + for (int i = 0; i < ADM1266_NUM_PAGES; i++) { + pmbus_page_config(pmdev, i, flags); + + object_property_add(obj, "vout[*]", "uint16", + adm1266_get, + adm1266_set, NULL, &pmdev->pages[i].read_vout); + } +} + +static void adm1266_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + PMBusDeviceClass *k = PMBUS_DEVICE_CLASS(klass); + + dc->desc = "Analog Devices ADM1266 Hot Swap controller"; + dc->vmsd = &vmstate_adm1266; + k->write_data = adm1266_write_data; + k->receive_byte = adm1266_read_byte; + k->device_num_pages = 17; + + rc->phases.exit = adm1266_exit_reset; +} + +static const TypeInfo adm1266_info = { + .name = TYPE_ADM1266, + .parent = TYPE_PMBUS_DEVICE, + .instance_size = sizeof(ADM1266State), + .instance_init = adm1266_init, + .class_init = adm1266_class_init, +}; + +static void adm1266_register_types(void) +{ + type_register_static(&adm1266_info); +} + +type_init(adm1266_register_types) diff --git a/hw/sensor/meson.build b/hw/sensor/meson.build index 30e20e27b8..420fdc3359 100644 --- a/hw/sensor/meson.build +++ b/hw/sensor/meson.build @@ -2,6 +2,7 @@ system_ss.add(when: 'CONFIG_TMP105', if_true: files('tmp105.c')) system_ss.add(when: 'CONFIG_TMP421', if_true: files('tmp421.c')) system_ss.add(when: 'CONFIG_DPS310', if_true: files('dps310.c')) system_ss.add(when: 'CONFIG_EMC141X', if_true: files('emc141x.c')) +system_ss.add(when: 'CONFIG_ADM1266', if_true: files('adm1266.c')) system_ss.add(when: 'CONFIG_ADM1272', if_true: files('adm1272.c')) system_ss.add(when: 'CONFIG_MAX34451', if_true: files('max34451.c')) system_ss.add(when: 'CONFIG_LSM303DLHC_MAG', if_true: files('lsm303dlhc_mag.c')) diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c index 49d699ffc2..eb91723855 100644 --- a/hw/virtio/vhost-user-fs.c +++ b/hw/virtio/vhost-user-fs.c @@ -298,9 +298,108 @@ static struct vhost_dev *vuf_get_vhost(VirtIODevice *vdev) return &fs->vhost_dev; } +/** + * Fetch the internal state from virtiofsd and save it to `f`. + */ +static int vuf_save_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + VirtIODevice *vdev = pv; + VHostUserFS *fs = VHOST_USER_FS(vdev); + Error *local_error = NULL; + int ret; + + ret = vhost_save_backend_state(&fs->vhost_dev, f, &local_error); + if (ret < 0) { + error_reportf_err(local_error, + "Error saving back-end state of %s device %s " + "(tag: \"%s\"): ", + vdev->name, vdev->parent_obj.canonical_path, + fs->conf.tag ?: "<none>"); + return ret; + } + + return 0; +} + +/** + * Load virtiofsd's internal state from `f` and send it over to virtiofsd. + */ +static int vuf_load_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + VirtIODevice *vdev = pv; + VHostUserFS *fs = VHOST_USER_FS(vdev); + Error *local_error = NULL; + int ret; + + ret = vhost_load_backend_state(&fs->vhost_dev, f, &local_error); + if (ret < 0) { + error_reportf_err(local_error, + "Error loading back-end state of %s device %s " + "(tag: \"%s\"): ", + vdev->name, vdev->parent_obj.canonical_path, + fs->conf.tag ?: "<none>"); + return ret; + } + + return 0; +} + +static bool vuf_is_internal_migration(void *opaque) +{ + /* TODO: Return false when an external migration is requested */ + return true; +} + +static int vuf_check_migration_support(void *opaque) +{ + VirtIODevice *vdev = opaque; + VHostUserFS *fs = VHOST_USER_FS(vdev); + + if (!vhost_supports_device_state(&fs->vhost_dev)) { + error_report("Back-end of %s device %s (tag: \"%s\") does not support " + "migration through qemu", + vdev->name, vdev->parent_obj.canonical_path, + fs->conf.tag ?: "<none>"); + return -ENOTSUP; + } + + return 0; +} + +static const VMStateDescription vuf_backend_vmstate; + static const VMStateDescription vuf_vmstate = { .name = "vhost-user-fs", - .unmigratable = 1, + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vuf_backend_vmstate, + NULL, + } +}; + +static const VMStateDescription vuf_backend_vmstate = { + .name = "vhost-user-fs-backend", + .version_id = 0, + .needed = vuf_is_internal_migration, + .pre_load = vuf_check_migration_support, + .pre_save = vuf_check_migration_support, + .fields = (VMStateField[]) { + { + .name = "back-end", + .info = &(const VMStateInfo) { + .name = "virtio-fs back-end state", + .get = vuf_load_state, + .put = vuf_save_state, + }, + }, + VMSTATE_END_OF_LIST() + }, }; static Property vuf_properties[] = { diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 7b42ae8aae..f214df804b 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -103,6 +103,8 @@ typedef enum VhostUserRequest { VHOST_USER_SET_STATUS = 39, VHOST_USER_GET_STATUS = 40, VHOST_USER_GET_SHARED_OBJECT = 41, + VHOST_USER_SET_DEVICE_STATE_FD = 42, + VHOST_USER_CHECK_DEVICE_STATE = 43, VHOST_USER_MAX } VhostUserRequest; @@ -201,6 +203,12 @@ typedef struct { uint32_t size; /* the following payload size */ } QEMU_PACKED VhostUserHeader; +/* Request payload of VHOST_USER_SET_DEVICE_STATE_FD */ +typedef struct VhostUserTransferDeviceState { + uint32_t direction; + uint32_t phase; +} VhostUserTransferDeviceState; + typedef union { #define VHOST_USER_VRING_IDX_MASK (0xff) #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) @@ -216,6 +224,7 @@ typedef union { VhostUserVringArea area; VhostUserInflight inflight; VhostUserShared object; + VhostUserTransferDeviceState transfer_state; } VhostUserPayload; typedef struct VhostUserMsg { @@ -2855,6 +2864,140 @@ static void vhost_user_reset_status(struct vhost_dev *dev) } } +static bool vhost_user_supports_device_state(struct vhost_dev *dev) +{ + return virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_DEVICE_STATE); +} + +static int vhost_user_set_device_state_fd(struct vhost_dev *dev, + VhostDeviceStateDirection direction, + VhostDeviceStatePhase phase, + int fd, + int *reply_fd, + Error **errp) +{ + int ret; + struct vhost_user *vu = dev->opaque; + VhostUserMsg msg = { + .hdr = { + .request = VHOST_USER_SET_DEVICE_STATE_FD, + .flags = VHOST_USER_VERSION, + .size = sizeof(msg.payload.transfer_state), + }, + .payload.transfer_state = { + .direction = direction, + .phase = phase, + }, + }; + + *reply_fd = -1; + + if (!vhost_user_supports_device_state(dev)) { + close(fd); + error_setg(errp, "Back-end does not support migration state transfer"); + return -ENOTSUP; + } + + ret = vhost_user_write(dev, &msg, &fd, 1); + close(fd); + if (ret < 0) { + error_setg_errno(errp, -ret, + "Failed to send SET_DEVICE_STATE_FD message"); + return ret; + } + + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + error_setg_errno(errp, -ret, + "Failed to receive SET_DEVICE_STATE_FD reply"); + return ret; + } + + if (msg.hdr.request != VHOST_USER_SET_DEVICE_STATE_FD) { + error_setg(errp, + "Received unexpected message type, expected %d, received %d", + VHOST_USER_SET_DEVICE_STATE_FD, msg.hdr.request); + return -EPROTO; + } + + if (msg.hdr.size != sizeof(msg.payload.u64)) { + error_setg(errp, + "Received bad message size, expected %zu, received %" PRIu32, + sizeof(msg.payload.u64), msg.hdr.size); + return -EPROTO; + } + + if ((msg.payload.u64 & 0xff) != 0) { + error_setg(errp, "Back-end did not accept migration state transfer"); + return -EIO; + } + + if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) { + *reply_fd = qemu_chr_fe_get_msgfd(vu->user->chr); + if (*reply_fd < 0) { + error_setg(errp, + "Failed to get back-end-provided transfer pipe FD"); + *reply_fd = -1; + return -EIO; + } + } + + return 0; +} + +static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp) +{ + int ret; + VhostUserMsg msg = { + .hdr = { + .request = VHOST_USER_CHECK_DEVICE_STATE, + .flags = VHOST_USER_VERSION, + .size = 0, + }, + }; + + if (!vhost_user_supports_device_state(dev)) { + error_setg(errp, "Back-end does not support migration state transfer"); + return -ENOTSUP; + } + + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + error_setg_errno(errp, -ret, + "Failed to send CHECK_DEVICE_STATE message"); + return ret; + } + + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + error_setg_errno(errp, -ret, + "Failed to receive CHECK_DEVICE_STATE reply"); + return ret; + } + + if (msg.hdr.request != VHOST_USER_CHECK_DEVICE_STATE) { + error_setg(errp, + "Received unexpected message type, expected %d, received %d", + VHOST_USER_CHECK_DEVICE_STATE, msg.hdr.request); + return -EPROTO; + } + + if (msg.hdr.size != sizeof(msg.payload.u64)) { + error_setg(errp, + "Received bad message size, expected %zu, received %" PRIu32, + sizeof(msg.payload.u64), msg.hdr.size); + return -EPROTO; + } + + if (msg.payload.u64 != 0) { + error_setg(errp, "Back-end failed to process its internal state"); + return -EIO; + } + + return 0; +} + const VhostOps user_ops = { .backend_type = VHOST_BACKEND_TYPE_USER, .vhost_backend_init = vhost_user_backend_init, @@ -2890,4 +3033,7 @@ const VhostOps user_ops = { .vhost_set_inflight_fd = vhost_user_set_inflight_fd, .vhost_dev_start = vhost_user_dev_start, .vhost_reset_status = vhost_user_reset_status, + .vhost_supports_device_state = vhost_user_supports_device_state, + .vhost_set_device_state_fd = vhost_user_set_device_state_fd, + .vhost_check_device_state = vhost_user_check_device_state, }; diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 9c9ae7109e..2c9ac79468 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -2159,3 +2159,244 @@ int vhost_reset_device(struct vhost_dev *hdev) return -ENOSYS; } + +bool vhost_supports_device_state(struct vhost_dev *dev) +{ + if (dev->vhost_ops->vhost_supports_device_state) { + return dev->vhost_ops->vhost_supports_device_state(dev); + } + + return false; +} + +int vhost_set_device_state_fd(struct vhost_dev *dev, + VhostDeviceStateDirection direction, + VhostDeviceStatePhase phase, + int fd, + int *reply_fd, + Error **errp) +{ + if (dev->vhost_ops->vhost_set_device_state_fd) { + return dev->vhost_ops->vhost_set_device_state_fd(dev, direction, phase, + fd, reply_fd, errp); + } + + error_setg(errp, + "vhost transport does not support migration state transfer"); + return -ENOSYS; +} + +int vhost_check_device_state(struct vhost_dev *dev, Error **errp) +{ + if (dev->vhost_ops->vhost_check_device_state) { + return dev->vhost_ops->vhost_check_device_state(dev, errp); + } + + error_setg(errp, + "vhost transport does not support migration state transfer"); + return -ENOSYS; +} + +int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp) +{ + /* Maximum chunk size in which to transfer the state */ + const size_t chunk_size = 1 * 1024 * 1024; + g_autofree void *transfer_buf = NULL; + g_autoptr(GError) g_err = NULL; + int pipe_fds[2], read_fd = -1, write_fd = -1, reply_fd = -1; + int ret; + + /* [0] for reading (our end), [1] for writing (back-end's end) */ + if (!g_unix_open_pipe(pipe_fds, FD_CLOEXEC, &g_err)) { + error_setg(errp, "Failed to set up state transfer pipe: %s", + g_err->message); + ret = -EINVAL; + goto fail; + } + + read_fd = pipe_fds[0]; + write_fd = pipe_fds[1]; + + /* + * VHOST_TRANSFER_STATE_PHASE_STOPPED means the device must be stopped. + * Ideally, it is suspended, but SUSPEND/RESUME currently do not exist for + * vhost-user, so just check that it is stopped at all. + */ + assert(!dev->started); + + /* Transfer ownership of write_fd to the back-end */ + ret = vhost_set_device_state_fd(dev, + VHOST_TRANSFER_STATE_DIRECTION_SAVE, + VHOST_TRANSFER_STATE_PHASE_STOPPED, + write_fd, + &reply_fd, + errp); + if (ret < 0) { + error_prepend(errp, "Failed to initiate state transfer: "); + goto fail; + } + + /* If the back-end wishes to use a different pipe, switch over */ + if (reply_fd >= 0) { + close(read_fd); + read_fd = reply_fd; + } + + transfer_buf = g_malloc(chunk_size); + + while (true) { + ssize_t read_ret; + + read_ret = RETRY_ON_EINTR(read(read_fd, transfer_buf, chunk_size)); + if (read_ret < 0) { + ret = -errno; + error_setg_errno(errp, -ret, "Failed to receive state"); + goto fail; + } + + assert(read_ret <= chunk_size); + qemu_put_be32(f, read_ret); + + if (read_ret == 0) { + /* EOF */ + break; + } + + qemu_put_buffer(f, transfer_buf, read_ret); + } + + /* + * Back-end will not really care, but be clean and close our end of the pipe + * before inquiring the back-end about whether transfer was successful + */ + close(read_fd); + read_fd = -1; + + /* Also, verify that the device is still stopped */ + assert(!dev->started); + + ret = vhost_check_device_state(dev, errp); + if (ret < 0) { + goto fail; + } + + ret = 0; +fail: + if (read_fd >= 0) { + close(read_fd); + } + + return ret; +} + +int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp) +{ + size_t transfer_buf_size = 0; + g_autofree void *transfer_buf = NULL; + g_autoptr(GError) g_err = NULL; + int pipe_fds[2], read_fd = -1, write_fd = -1, reply_fd = -1; + int ret; + + /* [0] for reading (back-end's end), [1] for writing (our end) */ + if (!g_unix_open_pipe(pipe_fds, FD_CLOEXEC, &g_err)) { + error_setg(errp, "Failed to set up state transfer pipe: %s", + g_err->message); + ret = -EINVAL; + goto fail; + } + + read_fd = pipe_fds[0]; + write_fd = pipe_fds[1]; + + /* + * VHOST_TRANSFER_STATE_PHASE_STOPPED means the device must be stopped. + * Ideally, it is suspended, but SUSPEND/RESUME currently do not exist for + * vhost-user, so just check that it is stopped at all. + */ + assert(!dev->started); + + /* Transfer ownership of read_fd to the back-end */ + ret = vhost_set_device_state_fd(dev, + VHOST_TRANSFER_STATE_DIRECTION_LOAD, + VHOST_TRANSFER_STATE_PHASE_STOPPED, + read_fd, + &reply_fd, + errp); + if (ret < 0) { + error_prepend(errp, "Failed to initiate state transfer: "); + goto fail; + } + + /* If the back-end wishes to use a different pipe, switch over */ + if (reply_fd >= 0) { + close(write_fd); + write_fd = reply_fd; + } + + while (true) { + size_t this_chunk_size = qemu_get_be32(f); + ssize_t write_ret; + const uint8_t *transfer_pointer; + + if (this_chunk_size == 0) { + /* End of state */ + break; + } + + if (transfer_buf_size < this_chunk_size) { + transfer_buf = g_realloc(transfer_buf, this_chunk_size); + transfer_buf_size = this_chunk_size; + } + + if (qemu_get_buffer(f, transfer_buf, this_chunk_size) < + this_chunk_size) + { + error_setg(errp, "Failed to read state"); + ret = -EINVAL; + goto fail; + } + + transfer_pointer = transfer_buf; + while (this_chunk_size > 0) { + write_ret = RETRY_ON_EINTR( + write(write_fd, transfer_pointer, this_chunk_size) + ); + if (write_ret < 0) { + ret = -errno; + error_setg_errno(errp, -ret, "Failed to send state"); + goto fail; + } else if (write_ret == 0) { + error_setg(errp, "Failed to send state: Connection is closed"); + ret = -ECONNRESET; + goto fail; + } + + assert(write_ret <= this_chunk_size); + this_chunk_size -= write_ret; + transfer_pointer += write_ret; + } + } + + /* + * Close our end, thus ending transfer, before inquiring the back-end about + * whether transfer was successful + */ + close(write_fd); + write_fd = -1; + + /* Also, verify that the device is still stopped */ + assert(!dev->started); + + ret = vhost_check_device_state(dev, errp); + if (ret < 0) { + goto fail; + } + + ret = 0; +fail: + if (write_fd >= 0) { + close(write_fd); + } + + return ret; +} diff --git a/hw/xen/xen-backend.c b/hw/xen/xen-backend.c index 5b0fb76eae..b9bf70a9f5 100644 --- a/hw/xen/xen-backend.c +++ b/hw/xen/xen-backend.c @@ -101,6 +101,24 @@ static XenBackendInstance *xen_backend_list_find(XenDevice *xendev) return NULL; } +bool xen_backend_exists(const char *type, const char *name) +{ + const XenBackendImpl *impl = xen_backend_table_lookup(type); + XenBackendInstance *backend; + + if (!impl) { + return false; + } + + QLIST_FOREACH(backend, &backend_list, entry) { + if (backend->impl == impl && !strcmp(backend->name, name)) { + return true; + } + } + + return false; +} + static void xen_backend_list_remove(XenBackendInstance *backend) { QLIST_REMOVE(backend, entry); @@ -122,11 +140,6 @@ void xen_backend_device_create(XenBus *xenbus, const char *type, backend->name = g_strdup(name); impl->create(backend, opts, errp); - if (*errp) { - g_free(backend->name); - g_free(backend); - return; - } backend->impl = impl; xen_backend_list_add(backend); @@ -165,7 +178,9 @@ bool xen_backend_try_device_destroy(XenDevice *xendev, Error **errp) } impl = backend->impl; - impl->destroy(backend, errp); + if (backend->xendev) { + impl->destroy(backend, errp); + } xen_backend_list_remove(backend); g_free(backend->name); diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c index ece8ec40cd..4973e7d9c9 100644 --- a/hw/xen/xen-bus.c +++ b/hw/xen/xen-bus.c @@ -209,7 +209,8 @@ static void xen_bus_type_enumerate(XenBus *xenbus, const char *type) NULL, "%u", &online) != 1) online = 0; - if (online && state == XenbusStateInitialising) { + if (online && state == XenbusStateInitialising && + !xen_backend_exists(type, backend[i])) { Error *local_err = NULL; xen_bus_backend_create(xenbus, type, backend[i], backend_path, @@ -711,8 +712,17 @@ static void xen_device_frontend_create(XenDevice *xendev, Error **errp) { ERRP_GUARD(); XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); - xendev->frontend_path = xen_device_get_frontend_path(xendev); + if (xendev_class->get_frontend_path) { + xendev->frontend_path = xendev_class->get_frontend_path(xendev, errp); + if (!xendev->frontend_path) { + error_prepend(errp, "failed to create frontend: "); + return; + } + } else { + xendev->frontend_path = xen_device_get_frontend_path(xendev); + } /* * The frontend area may have already been created by a legacy @@ -912,6 +922,11 @@ void xen_device_notify_event_channel(XenDevice *xendev, } } +unsigned int xen_event_channel_get_local_port(XenEventChannel *channel) +{ + return channel->local_port; +} + void xen_device_unbind_event_channel(XenDevice *xendev, XenEventChannel *channel, Error **errp) @@ -1118,11 +1133,13 @@ static void xen_register_types(void) type_init(xen_register_types) -void xen_bus_init(void) +BusState *xen_bus_init(void) { DeviceState *dev = qdev_new(TYPE_XEN_BRIDGE); BusState *bus = qbus_new(TYPE_XEN_BUS, dev, NULL); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); qbus_set_bus_hotplug_handler(bus); + + return bus; } diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c index 4ded3cec23..124dd5f3d6 100644 --- a/hw/xen/xen-legacy-backend.c +++ b/hw/xen/xen-legacy-backend.c @@ -623,7 +623,6 @@ void xen_be_init(void) xen_set_dynamic_sysbus(); - xen_be_register("console", &xen_console_ops); xen_be_register("vkbd", &xen_kbdmouse_ops); #ifdef CONFIG_VIRTFS xen_be_register("9pfs", &xen_9pfs_ops); diff --git a/hw/xen/xen_devconfig.c b/hw/xen/xen_devconfig.c index 9b7304e544..3f77c675c6 100644 --- a/hw/xen/xen_devconfig.c +++ b/hw/xen/xen_devconfig.c @@ -46,34 +46,6 @@ static int xen_config_dev_all(char *fe, char *be) /* ------------------------------------------------------------- */ -int xen_config_dev_blk(DriveInfo *disk) -{ - char fe[256], be[256], device_name[32]; - int vdev = 202 * 256 + 16 * disk->unit; - int cdrom = disk->media_cd; - const char *devtype = cdrom ? "cdrom" : "disk"; - const char *mode = cdrom ? "r" : "w"; - const char *filename = qemu_opt_get(disk->opts, "file"); - - snprintf(device_name, sizeof(device_name), "xvd%c", 'a' + disk->unit); - xen_pv_printf(NULL, 1, "config disk %d [%s]: %s\n", - disk->unit, device_name, filename); - xen_config_dev_dirs("vbd", "qdisk", vdev, fe, be, sizeof(fe)); - - /* frontend */ - xenstore_write_int(fe, "virtual-device", vdev); - xenstore_write_str(fe, "device-type", devtype); - - /* backend */ - xenstore_write_str(be, "dev", device_name); - xenstore_write_str(be, "type", "file"); - xenstore_write_str(be, "params", filename); - xenstore_write_str(be, "mode", mode); - - /* common stuff */ - return xen_config_dev_all(fe, be); -} - int xen_config_dev_nic(NICInfo *nic) { char fe[256], be[256]; diff --git a/hw/xenpv/xen_machine_pv.c b/hw/xenpv/xen_machine_pv.c index 17cda5ec13..9f9f137f99 100644 --- a/hw/xenpv/xen_machine_pv.c +++ b/hw/xenpv/xen_machine_pv.c @@ -32,7 +32,6 @@ static void xen_init_pv(MachineState *machine) { - DriveInfo *dinfo; int i; setup_xen_backend_ops(); @@ -55,7 +54,6 @@ static void xen_init_pv(MachineState *machine) } xen_be_register("vfb", &xen_framebuffer_ops); - xen_be_register("qnic", &xen_netdev_ops); /* configure framebuffer */ if (vga_interface_type == VGA_XENFB) { @@ -64,14 +62,6 @@ static void xen_init_pv(MachineState *machine) vga_interface_created = true; } - /* configure disks */ - for (i = 0; i < 16; i++) { - dinfo = drive_get(IF_XEN, 0, i); - if (!dinfo) - continue; - xen_config_dev_blk(dinfo); - } - /* configure nics */ for (i = 0; i < nb_nics; i++) { if (!nd_table[i].model || 0 != strcmp(nd_table[i].model, "xen")) diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 30c376a4de..41115d8919 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -45,9 +45,6 @@ void cpu_list_lock(void); void cpu_list_unlock(void); unsigned int cpu_list_generation_id_get(void); -void tcg_flush_softmmu_tlb(CPUState *cs); -void tcg_flush_jmp_cache(CPUState *cs); - void tcg_iommu_init_notifier_list(CPUState *cpu); void tcg_iommu_free_notifier_list(CPUState *cpu); diff --git a/include/exec/tb-flush.h b/include/exec/tb-flush.h index d92d06565b..142c240d94 100644 --- a/include/exec/tb-flush.h +++ b/include/exec/tb-flush.h @@ -23,4 +23,6 @@ */ void tb_flush(CPUState *cs); +void tcg_flush_jmp_cache(CPUState *cs); + #endif /* _TB_FLUSH_H_ */ diff --git a/include/hw/audio/virtio-snd.h b/include/hw/audio/virtio-snd.h new file mode 100644 index 0000000000..c3767f442b --- /dev/null +++ b/include/hw/audio/virtio-snd.h @@ -0,0 +1,235 @@ +/* + * VIRTIO Sound Device conforming to + * + * "Virtual I/O Device (VIRTIO) Version 1.2 + * Committee Specification Draft 01 + * 09 May 2022" + * + * Copyright (c) 2023 Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org> + * Copyright (C) 2019 OpenSynergy GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#ifndef QEMU_VIRTIO_SOUND_H +#define QEMU_VIRTIO_SOUND_H + +#include "hw/virtio/virtio.h" +#include "audio/audio.h" +#include "standard-headers/linux/virtio_ids.h" +#include "standard-headers/linux/virtio_snd.h" + +#define TYPE_VIRTIO_SND "virtio-sound-device" +#define VIRTIO_SND(obj) \ + OBJECT_CHECK(VirtIOSound, (obj), TYPE_VIRTIO_SND) + +/* CONFIGURATION SPACE */ + +typedef struct virtio_snd_config virtio_snd_config; + +/* COMMON DEFINITIONS */ + +/* common header for request/response*/ +typedef struct virtio_snd_hdr virtio_snd_hdr; + +/* event notification */ +typedef struct virtio_snd_event virtio_snd_event; + +/* common control request to query an item information */ +typedef struct virtio_snd_query_info virtio_snd_query_info; + +/* JACK CONTROL MESSAGES */ + +typedef struct virtio_snd_jack_hdr virtio_snd_jack_hdr; + +/* jack information structure */ +typedef struct virtio_snd_jack_info virtio_snd_jack_info; + +/* jack remapping control request */ +typedef struct virtio_snd_jack_remap virtio_snd_jack_remap; + +/* + * PCM CONTROL MESSAGES + */ +typedef struct virtio_snd_pcm_hdr virtio_snd_pcm_hdr; + +/* PCM stream info structure */ +typedef struct virtio_snd_pcm_info virtio_snd_pcm_info; + +/* set PCM stream params */ +typedef struct virtio_snd_pcm_set_params virtio_snd_pcm_set_params; + +/* I/O request header */ +typedef struct virtio_snd_pcm_xfer virtio_snd_pcm_xfer; + +/* I/O request status */ +typedef struct virtio_snd_pcm_status virtio_snd_pcm_status; + +/* device structs */ + +typedef struct VirtIOSound VirtIOSound; + +typedef struct VirtIOSoundPCMStream VirtIOSoundPCMStream; + +typedef struct virtio_snd_ctrl_command virtio_snd_ctrl_command; + +typedef struct VirtIOSoundPCM VirtIOSoundPCM; + +typedef struct VirtIOSoundPCMBuffer VirtIOSoundPCMBuffer; + +/* + * The VirtIO sound spec reuses layouts and values from the High Definition + * Audio spec (virtio/v1.2: 5.14 Sound Device). This struct handles each I/O + * message's buffer (virtio/v1.2: 5.14.6.8 PCM I/O Messages). + * + * In the case of TX (i.e. playback) buffers, we defer reading the raw PCM data + * from the virtqueue until QEMU's sound backsystem calls the output callback. + * This is tracked by the `bool populated;` field, which is set to true when + * data has been read into our own buffer for consumption. + * + * VirtIOSoundPCMBuffer has a dynamic size since it includes the raw PCM data + * in its allocation. It must be initialized and destroyed as follows: + * + * size_t size = [[derived from owned VQ element descriptor sizes]]; + * buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer) + size); + * buffer->elem = [[owned VQ element]]; + * + * [..] + * + * g_free(buffer->elem); + * g_free(buffer); + */ +struct VirtIOSoundPCMBuffer { + QSIMPLEQ_ENTRY(VirtIOSoundPCMBuffer) entry; + VirtQueueElement *elem; + VirtQueue *vq; + size_t size; + /* + * In TX / Plaback, `offset` represents the first unused position inside + * `data`. If `offset == size` then there are no unused data left. + */ + uint64_t offset; + /* Used for the TX queue for lazy I/O copy from `elem` */ + bool populated; + /* + * VirtIOSoundPCMBuffer is an unsized type because it ends with an array of + * bytes. The size of `data` is determined from the I/O message's read-only + * or write-only size when allocating VirtIOSoundPCMBuffer. + */ + uint8_t data[]; +}; + +struct VirtIOSoundPCM { + VirtIOSound *snd; + /* + * PCM parameters are a separate field instead of a VirtIOSoundPCMStream + * field, because the operation of PCM control requests is first + * VIRTIO_SND_R_PCM_SET_PARAMS and then VIRTIO_SND_R_PCM_PREPARE; this + * means that some times we get parameters without having an allocated + * stream yet. + */ + virtio_snd_pcm_set_params *pcm_params; + VirtIOSoundPCMStream **streams; +}; + +struct VirtIOSoundPCMStream { + VirtIOSoundPCM *pcm; + virtio_snd_pcm_info info; + virtio_snd_pcm_set_params params; + uint32_t id; + /* channel position values (VIRTIO_SND_CHMAP_XXX) */ + uint8_t positions[VIRTIO_SND_CHMAP_MAX_SIZE]; + VirtIOSound *s; + bool flushing; + audsettings as; + union { + SWVoiceIn *in; + SWVoiceOut *out; + } voice; + QemuMutex queue_mutex; + bool active; + QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) queue; + QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) invalid; +}; + +/* + * PCM stream state machine. + * ------------------------- + * + * 5.14.6.6.1 PCM Command Lifecycle + * ================================ + * + * A PCM stream has the following command lifecycle: + * - `SET PARAMETERS` + * The driver negotiates the stream parameters (format, transport, etc) with + * the device. + * Possible valid transitions: `SET PARAMETERS`, `PREPARE`. + * - `PREPARE` + * The device prepares the stream (allocates resources, etc). + * Possible valid transitions: `SET PARAMETERS`, `PREPARE`, `START`, + * `RELEASE`. Output only: the driver transfers data for pre-buffing. + * - `START` + * The device starts the stream (unmute, putting into running state, etc). + * Possible valid transitions: `STOP`. + * The driver transfers data to/from the stream. + * - `STOP` + * The device stops the stream (mute, putting into non-running state, etc). + * Possible valid transitions: `START`, `RELEASE`. + * - `RELEASE` + * The device releases the stream (frees resources, etc). + * Possible valid transitions: `SET PARAMETERS`, `PREPARE`. + * + * +---------------+ +---------+ +---------+ +-------+ +-------+ + * | SetParameters | | Prepare | | Release | | Start | | Stop | + * +---------------+ +---------+ +---------+ +-------+ +-------+ + * |- | | | | + * || | | | | + * |< | | | | + * |------------->| | | | + * |<-------------| | | | + * | |- | | | + * | || | | | + * | |< | | | + * | |--------------------->| | + * | |---------->| | | + * | | | |-------->| + * | | | |<--------| + * | | |<-------------------| + * |<-------------------------| | | + * | |<----------| | | + * + * CTRL in the VirtIOSound device + * ============================== + * + * The control messages that affect the state of a stream arrive in the + * `virtio_snd_handle_ctrl()` queue callback and are of type `struct + * virtio_snd_ctrl_command`. They are stored in a queue field in the device + * type, `VirtIOSound`. This allows deferring the CTRL request completion if + * it's not immediately possible due to locking/state reasons. + * + * The CTRL message is finally handled in `process_cmd()`. + */ +struct VirtIOSound { + VirtIODevice parent_obj; + + VirtQueue *queues[VIRTIO_SND_VQ_MAX]; + uint64_t features; + VirtIOSoundPCM *pcm; + QEMUSoundCard card; + VMChangeStateEntry *vmstate; + virtio_snd_config snd_conf; + QemuMutex cmdq_mutex; + QTAILQ_HEAD(, virtio_snd_ctrl_command) cmdq; + bool processing_cmdq; +}; + +struct virtio_snd_ctrl_command { + VirtQueueElement *elem; + VirtQueue *vq; + virtio_snd_hdr ctrl; + virtio_snd_hdr resp; + QTAILQ_ENTRY(virtio_snd_ctrl_command) next; +}; +#endif diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 18593db5b2..c0c8320413 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -102,7 +102,7 @@ struct SysemuCPUOps; /** * CPUClass: * @class_by_name: Callback to map -cpu command line model name to an - * instantiatable CPU type. + * instantiatable CPU type. * @parse_features: Callback to parse command line arguments. * @reset_dump_flags: #CPUDumpFlags to use for reset logging. * @has_work: Callback for checking if there is work to do. @@ -408,7 +408,7 @@ struct qemu_work_item; * See TranslationBlock::TCG CF_CLUSTER_MASK. * @tcg_cflags: Pre-computed cflags for this cpu. * @nr_cores: Number of cores within this CPU package. - * @nr_threads: Number of threads within this CPU. + * @nr_threads: Number of threads within this CPU core. * @running: #true if CPU is currently running (lockless). * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end; * valid under cpu_list_lock. @@ -586,13 +586,13 @@ static inline CPUArchState *cpu_env(CPUState *cpu) } typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ; -extern CPUTailQ cpus; +extern CPUTailQ cpus_queue; -#define first_cpu QTAILQ_FIRST_RCU(&cpus) +#define first_cpu QTAILQ_FIRST_RCU(&cpus_queue) #define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node) -#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node) +#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus_queue, node) #define CPU_FOREACH_SAFE(cpu, next_cpu) \ - QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu) + QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus_queue, node, next_cpu) extern __thread CPUState *current_cpu; @@ -772,9 +772,10 @@ void cpu_reset(CPUState *cpu); * @typename: The CPU base type. * @cpu_model: The model string without any parameters. * - * Looks up a CPU #ObjectClass matching name @cpu_model. + * Looks up a concrete CPU #ObjectClass matching name @cpu_model. * - * Returns: A #CPUClass or %NULL if not matching class is found. + * Returns: A concrete #CPUClass or %NULL if no matching class is found + * or if the matching class is abstract. */ ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model); @@ -1151,8 +1152,9 @@ G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...) /* $(top_srcdir)/cpu.c */ void cpu_class_init_props(DeviceClass *dc); void cpu_exec_initfn(CPUState *cpu); -void cpu_exec_realizefn(CPUState *cpu, Error **errp); +bool cpu_exec_realizefn(CPUState *cpu, Error **errp); void cpu_exec_unrealizefn(CPUState *cpu); +void cpu_exec_reset_hold(CPUState *cpu); /** * target_words_bigendian: diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h index 4944725849..75e47b6864 100644 --- a/include/hw/cxl/cxl.h +++ b/include/hw/cxl/cxl.h @@ -61,4 +61,10 @@ OBJECT_DECLARE_SIMPLE_TYPE(CXLHost, PXB_CXL_HOST) typedef struct CXLUpstreamPort CXLUpstreamPort; DECLARE_INSTANCE_CHECKER(CXLUpstreamPort, CXL_USP, TYPE_CXL_USP) CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp); + +#define TYPE_CXL_DSP "cxl-downstream" + +typedef struct CXLDownstreamPort CXLDownstreamPort; +DECLARE_INSTANCE_CHECKER(CXLDownstreamPort, CXL_DSP, TYPE_CXL_DSP) + #endif diff --git a/include/hw/cxl/cxl_component.h b/include/hw/cxl/cxl_component.h index 3c795a6278..5227a8e833 100644 --- a/include/hw/cxl/cxl_component.h +++ b/include/hw/cxl/cxl_component.h @@ -26,7 +26,8 @@ enum reg_type { CXL2_LOGICAL_DEVICE, CXL2_ROOT_PORT, CXL2_UPSTREAM_PORT, - CXL2_DOWNSTREAM_PORT + CXL2_DOWNSTREAM_PORT, + CXL3_SWITCH_MAILBOX_CCI, }; /* @@ -175,7 +176,8 @@ HDM_DECODER_INIT(3); (CXL_IDE_REGISTERS_OFFSET + CXL_IDE_REGISTERS_SIZE) #define CXL_SNOOP_REGISTERS_SIZE 0x8 -QEMU_BUILD_BUG_MSG((CXL_SNOOP_REGISTERS_OFFSET + CXL_SNOOP_REGISTERS_SIZE) >= 0x1000, +QEMU_BUILD_BUG_MSG((CXL_SNOOP_REGISTERS_OFFSET + + CXL_SNOOP_REGISTERS_SIZE) >= 0x1000, "No space for registers"); typedef struct component_registers { diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h index 51cd0d9ce3..61b7f897f7 100644 --- a/include/hw/cxl/cxl_device.h +++ b/include/hw/cxl/cxl_device.h @@ -111,6 +111,20 @@ typedef enum { CXL_MBOX_MAX = 0x17 } CXLRetCode; +typedef struct CXLCCI CXLCCI; +typedef struct cxl_device_state CXLDeviceState; +struct cxl_cmd; +typedef CXLRetCode (*opcode_handler)(const struct cxl_cmd *cmd, + uint8_t *payload_in, size_t len_in, + uint8_t *payload_out, size_t *len_out, + CXLCCI *cci); +struct cxl_cmd { + const char *name; + opcode_handler handler; + ssize_t in; + uint16_t effect; /* Reported in CEL */ +}; + typedef struct CXLEvent { CXLEventRecordRaw data; QSIMPLEQ_ENTRY(CXLEvent) node; @@ -127,6 +141,31 @@ typedef struct CXLEventLog { QSIMPLEQ_HEAD(, CXLEvent) events; } CXLEventLog; +typedef struct CXLCCI { + const struct cxl_cmd (*cxl_cmd_set)[256]; + struct cel_log { + uint16_t opcode; + uint16_t effect; + } cel_log[1 << 16]; + size_t cel_size; + + /* background command handling (times in ms) */ + struct { + uint16_t opcode; + uint16_t complete_pct; + uint16_t ret_code; /* Current value of retcode */ + uint64_t starttime; + /* set by each bg cmd, cleared by the bg_timer when complete */ + uint64_t runtime; + QEMUTimer *timer; + } bg; + size_t payload_max; + /* Pointer to device hosting the CCI */ + DeviceState *d; + /* Pointer to the device hosting the protocol conversion */ + DeviceState *intf; +} CXLCCI; + typedef struct cxl_device_state { MemoryRegion device_registers; @@ -154,17 +193,13 @@ typedef struct cxl_device_state { struct { MemoryRegion mailbox; uint16_t payload_size; + uint8_t mbox_msi_n; union { uint8_t mbox_reg_state[CXL_MAILBOX_REGISTERS_LENGTH]; uint16_t mbox_reg_state16[CXL_MAILBOX_REGISTERS_LENGTH / 2]; uint32_t mbox_reg_state32[CXL_MAILBOX_REGISTERS_LENGTH / 4]; uint64_t mbox_reg_state64[CXL_MAILBOX_REGISTERS_LENGTH / 8]; }; - struct cel_log { - uint16_t opcode; - uint16_t effect; - } cel_log[1 << 16]; - size_t cel_size; }; struct { @@ -178,21 +213,26 @@ typedef struct cxl_device_state { uint64_t pmem_size; uint64_t vmem_size; + const struct cxl_cmd (*cxl_cmd_set)[256]; CXLEventLog event_logs[CXL_EVENT_TYPE_MAX]; } CXLDeviceState; /* Initialize the register block for a device */ -void cxl_device_register_block_init(Object *obj, CXLDeviceState *dev); +void cxl_device_register_block_init(Object *obj, CXLDeviceState *dev, + CXLCCI *cci); +typedef struct CXLType3Dev CXLType3Dev; +typedef struct CSWMBCCIDev CSWMBCCIDev; /* Set up default values for the register block */ -void cxl_device_register_init_common(CXLDeviceState *dev); +void cxl_device_register_init_t3(CXLType3Dev *ct3d); +void cxl_device_register_init_swcci(CSWMBCCIDev *sw); /* * CXL 2.0 - 8.2.8.1 including errata F4 * Documented as a 128 bit register, but 64 bit accesses and the second * 64 bits are currently reserved. */ -REG64(CXL_DEV_CAP_ARRAY, 0) /* Documented as 128 bit register but 64 byte accesses */ +REG64(CXL_DEV_CAP_ARRAY, 0) FIELD(CXL_DEV_CAP_ARRAY, CAP_ID, 0, 16) FIELD(CXL_DEV_CAP_ARRAY, CAP_VERSION, 16, 8) FIELD(CXL_DEV_CAP_ARRAY, CAP_COUNT, 32, 16) @@ -231,8 +271,20 @@ CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MEMORY_DEVICE, CXL_DEVICE_CAP_HDR1_OFFSET + CXL_DEVICE_CAP_REG_SIZE * 2) -void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate); -void cxl_process_mailbox(CXLDeviceState *cxl_dstate); +void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max); +void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, + DeviceState *d, size_t payload_max); +void cxl_init_cci(CXLCCI *cci, size_t payload_max); +int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, + size_t len_in, uint8_t *pl_in, + size_t *len_out, uint8_t *pl_out, + bool *bg_started); +void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, + DeviceState *intf, + size_t payload_max); + +void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, + DeviceState *intf, size_t payload_max); #define cxl_device_cap_init(dstate, reg, cap_id, ver) \ do { \ @@ -297,6 +349,23 @@ REG64(CXL_MEM_DEV_STS, 0) FIELD(CXL_MEM_DEV_STS, MBOX_READY, 4, 1) FIELD(CXL_MEM_DEV_STS, RESET_NEEDED, 5, 3) +static inline void __toggle_media(CXLDeviceState *cxl_dstate, int val) +{ + uint64_t dev_status_reg; + + dev_status_reg = FIELD_DP64(0, CXL_MEM_DEV_STS, MEDIA_STATUS, val); + cxl_dstate->mbox_reg_state64[R_CXL_MEM_DEV_STS] = dev_status_reg; +} +#define cxl_dev_disable_media(cxlds) \ + do { __toggle_media((cxlds), 0x3); } while (0) +#define cxl_dev_enable_media(cxlds) \ + do { __toggle_media((cxlds), 0x1); } while (0) + +static inline bool sanitize_running(CXLCCI *cci) +{ + return !!cci->bg.runtime && cci->bg.opcode == 0x4400; +} + typedef struct CXLError { QTAILQ_ENTRY(CXLError) node; int type; /* Error code as per FE definition */ @@ -333,6 +402,10 @@ struct CXLType3Dev { AddressSpace hostpmem_as; CXLComponentState cxl_cstate; CXLDeviceState cxl_dstate; + CXLCCI cci; /* Primary PCI mailbox CCI */ + /* Always intialized as no way to know if a VDM might show up */ + CXLCCI vdm_fm_owned_ld_mctp_cci; + CXLCCI ld0_cci; /* DOE */ DOECap doe_cdat; @@ -361,9 +434,21 @@ struct CXLType3Class { uint64_t offset); void (*set_lsa)(CXLType3Dev *ct3d, const void *buf, uint64_t size, uint64_t offset); - bool (*set_cacheline)(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data); + bool (*set_cacheline)(CXLType3Dev *ct3d, uint64_t dpa_offset, + uint8_t *data); }; +struct CSWMBCCIDev { + PCIDevice parent_obj; + PCIDevice *target; + CXLComponentState cxl_cstate; + CXLDeviceState cxl_dstate; + CXLCCI *cci; +}; + +#define TYPE_CXL_SWITCH_MAILBOX_CCI "cxl-switch-mailbox-cci" +OBJECT_DECLARE_TYPE(CSWMBCCIDev, CSWMBCCIClass, CXL_SWITCH_MAILBOX_CCI) + MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, unsigned size, MemTxAttrs attrs); MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, @@ -376,7 +461,7 @@ bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type, CXLEventRecordRaw *event); CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl, uint8_t log_type, int max_recs, - uint16_t *len); + size_t *len); CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds, CXLClearEventPayload *pl); diff --git a/include/hw/cxl/cxl_events.h b/include/hw/cxl/cxl_events.h index 089ba2091f..d778487b7e 100644 --- a/include/hw/cxl/cxl_events.h +++ b/include/hw/cxl/cxl_events.h @@ -92,7 +92,8 @@ typedef enum CXLEventIntMode { CXL_INT_RES = 0x03, } CXLEventIntMode; #define CXL_EVENT_INT_MODE_MASK 0x3 -#define CXL_EVENT_INT_SETTING(vector) ((((uint8_t)vector & 0xf) << 4) | CXL_INT_MSI_MSIX) +#define CXL_EVENT_INT_SETTING(vector) \ + ((((uint8_t)vector & 0xf) << 4) | CXL_INT_MSI_MSIX) typedef struct CXLEventInterruptPolicy { uint8_t info_settings; uint8_t warn_settings; diff --git a/include/hw/cxl/cxl_pci.h b/include/hw/cxl/cxl_pci.h index 407be95b9e..ddf01a543b 100644 --- a/include/hw/cxl/cxl_pci.h +++ b/include/hw/cxl/cxl_pci.h @@ -86,7 +86,7 @@ typedef struct CXLDVSECDevice { QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDevice) != 0x38); /* CXL 2.0 - 8.1.5 (ID 0003) */ -typedef struct CXLDVSECPortExtensions { +typedef struct CXLDVSECPortExt { DVSECHeader hdr; uint16_t status; uint16_t control; @@ -100,8 +100,8 @@ typedef struct CXLDVSECPortExtensions { uint32_t alt_prefetch_limit_high; uint32_t rcrb_base; uint32_t rcrb_base_high; -} CXLDVSECPortExtensions; -QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortExtensions) != 0x28); +} CXLDVSECPortExt; +QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortExt) != 0x28); #define PORT_CONTROL_OFFSET 0xc #define PORT_CONTROL_UNMASK_SBR 1 diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h index dffb0e73d2..0a5c258fe6 100644 --- a/include/hw/elf_ops.h +++ b/include/hw/elf_ops.h @@ -385,10 +385,11 @@ static ssize_t glue(load_elf, SZ)(const char *name, int fd, } if (pflags) { - *pflags = (elf_word)ehdr.e_flags; + *pflags = ehdr.e_flags; + } + if (pentry) { + *pentry = ehdr.e_entry; } - if (pentry) - *pentry = (uint64_t)(elf_sword)ehdr.e_entry; glue(load_symbols, SZ)(&ehdr, fd, must_swab, clear_lsb, sym_cb); @@ -610,10 +611,12 @@ static ssize_t glue(load_elf, SZ)(const char *name, int fd, } } - if (lowaddr) - *lowaddr = (uint64_t)(elf_sword)low; - if (highaddr) - *highaddr = (uint64_t)(elf_sword)high; + if (lowaddr) { + *lowaddr = low; + } + if (highaddr) { + *highaddr = high; + } ret = total_size; fail: if (mapped_file) { diff --git a/include/hw/i2c/pmbus_device.h b/include/hw/i2c/pmbus_device.h index 93f5d57c9d..f195c11384 100644 --- a/include/hw/i2c/pmbus_device.h +++ b/include/hw/i2c/pmbus_device.h @@ -243,6 +243,7 @@ OBJECT_DECLARE_TYPE(PMBusDevice, PMBusDeviceClass, #define PB_HAS_VIN_RATING BIT_ULL(13) #define PB_HAS_VOUT_RATING BIT_ULL(14) #define PB_HAS_VOUT_MODE BIT_ULL(15) +#define PB_HAS_VCAP BIT_ULL(16) #define PB_HAS_IOUT BIT_ULL(21) #define PB_HAS_IIN BIT_ULL(22) #define PB_HAS_IOUT_RATING BIT_ULL(23) @@ -258,6 +259,7 @@ OBJECT_DECLARE_TYPE(PMBusDevice, PMBusDeviceClass, #define PB_HAS_TEMP2 BIT_ULL(41) #define PB_HAS_TEMP3 BIT_ULL(42) #define PB_HAS_TEMP_RATING BIT_ULL(43) +#define PB_HAS_FAN BIT_ULL(44) #define PB_HAS_MFR_INFO BIT_ULL(50) #define PB_HAS_STATUS_MFR_SPECIFIC BIT_ULL(51) @@ -445,6 +447,14 @@ typedef struct PMBusCoefficients { } PMBusCoefficients; /** + * VOUT_Mode bit fields + */ +typedef struct PMBusVoutMode { + uint8_t mode:3; + int8_t exp:5; +} PMBusVoutMode; + +/** * Convert sensor values to direct mode format * * Y = (m * x - b) * 10^R @@ -502,6 +512,13 @@ void pmbus_send64(PMBusDevice *state, uint64_t data); void pmbus_send_string(PMBusDevice *state, const char *data); /** + * @brief Receive data sent with Block Write. + * @param dest - memory with enough capacity to receive the write + * @param len - the capacity of dest + */ +uint8_t pmbus_receive_block(PMBusDevice *pmdev, uint8_t *dest, size_t len); + +/** * @brief Receive data over PMBus * These methods help track how much data is being received over PMBus * Log to GUEST_ERROR if too much or too little is sent. diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 29a9724524..a10ceeabbf 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -33,6 +33,7 @@ typedef struct PCMachineState { /* Pointers to devices and objects: */ PCIBus *bus; + BusState *xenbus; I2CBus *smbus; PFlashCFI01 *flash[2]; ISADevice *pcspk; @@ -184,7 +185,8 @@ void pc_basic_device_init(struct PCMachineState *pcms, void pc_cmos_init(PCMachineState *pcms, BusState *ide0, BusState *ide1, ISADevice *s); -void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus); +void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus, + BusState *xen_bus); void pc_i8259_create(ISABus *isa_bus, qemu_irq *i8259_irqs); diff --git a/include/hw/i386/topology.h b/include/hw/i386/topology.h index 380cb27ded..d4eeb7ab82 100644 --- a/include/hw/i386/topology.h +++ b/include/hw/i386/topology.h @@ -24,7 +24,8 @@ #ifndef HW_I386_TOPOLOGY_H #define HW_I386_TOPOLOGY_H -/* This file implements the APIC-ID-based CPU topology enumeration logic, +/* + * This file implements the APIC-ID-based CPU topology enumeration logic, * documented at the following document: * Intel® 64 Architecture Processor Topology Enumeration * http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/ @@ -41,7 +42,8 @@ #include "qemu/bitops.h" -/* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support +/* + * APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support */ typedef uint32_t apic_id_t; @@ -58,8 +60,7 @@ typedef struct X86CPUTopoInfo { unsigned threads_per_core; } X86CPUTopoInfo; -/* Return the bit width needed for 'count' IDs - */ +/* Return the bit width needed for 'count' IDs */ static unsigned apicid_bitwidth_for_count(unsigned count) { g_assert(count >= 1); @@ -67,15 +68,13 @@ static unsigned apicid_bitwidth_for_count(unsigned count) return count ? 32 - clz32(count) : 0; } -/* Bit width of the SMT_ID (thread ID) field on the APIC ID - */ +/* Bit width of the SMT_ID (thread ID) field on the APIC ID */ static inline unsigned apicid_smt_width(X86CPUTopoInfo *topo_info) { return apicid_bitwidth_for_count(topo_info->threads_per_core); } -/* Bit width of the Core_ID field - */ +/* Bit width of the Core_ID field */ static inline unsigned apicid_core_width(X86CPUTopoInfo *topo_info) { return apicid_bitwidth_for_count(topo_info->cores_per_die); @@ -87,8 +86,7 @@ static inline unsigned apicid_die_width(X86CPUTopoInfo *topo_info) return apicid_bitwidth_for_count(topo_info->dies_per_pkg); } -/* Bit offset of the Core_ID field - */ +/* Bit offset of the Core_ID field */ static inline unsigned apicid_core_offset(X86CPUTopoInfo *topo_info) { return apicid_smt_width(topo_info); @@ -100,14 +98,14 @@ static inline unsigned apicid_die_offset(X86CPUTopoInfo *topo_info) return apicid_core_offset(topo_info) + apicid_core_width(topo_info); } -/* Bit offset of the Pkg_ID (socket ID) field - */ +/* Bit offset of the Pkg_ID (socket ID) field */ static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info) { return apicid_die_offset(topo_info) + apicid_die_width(topo_info); } -/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID +/* + * Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID * * The caller must make sure core_id < nr_cores and smt_id < nr_threads. */ @@ -120,7 +118,8 @@ static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info, topo_ids->smt_id; } -/* Calculate thread/core/package IDs for a specific topology, +/* + * Calculate thread/core/package IDs for a specific topology, * based on (contiguous) CPU index */ static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info, @@ -137,7 +136,8 @@ static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info, topo_ids->smt_id = cpu_index % nr_threads; } -/* Calculate thread/core/package IDs for a specific topology, +/* + * Calculate thread/core/package IDs for a specific topology, * based on APIC ID */ static inline void x86_topo_ids_from_apicid(apic_id_t apicid, @@ -155,7 +155,8 @@ static inline void x86_topo_ids_from_apicid(apic_id_t apicid, topo_ids->pkg_id = apicid >> apicid_pkg_offset(topo_info); } -/* Make APIC ID for the CPU 'cpu_index' +/* + * Make APIC ID for the CPU 'cpu_index' * * 'cpu_index' is a sequential, contiguous ID for the CPU. */ diff --git a/include/hw/loader.h b/include/hw/loader.h index c4c14170ea..8685e27334 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -272,7 +272,7 @@ void pstrcpy_targphys(const char *name, ssize_t rom_add_file(const char *file, const char *fw_dir, hwaddr addr, int32_t bootindex, - bool option_rom, MemoryRegion *mr, AddressSpace *as); + bool has_option_rom, MemoryRegion *mr, AddressSpace *as); MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, size_t max_len, hwaddr addr, const char *fw_file_name, diff --git a/include/hw/pci-bridge/cxl_upstream_port.h b/include/hw/pci-bridge/cxl_upstream_port.h new file mode 100644 index 0000000000..12635139f6 --- /dev/null +++ b/include/hw/pci-bridge/cxl_upstream_port.h @@ -0,0 +1,19 @@ + +#ifndef CXL_USP_H +#define CXL_USP_H +#include "hw/pci/pcie.h" +#include "hw/pci/pcie_port.h" +#include "hw/cxl/cxl.h" + +typedef struct CXLUpstreamPort { + /*< private >*/ + PCIEPort parent_obj; + + /*< public >*/ + CXLComponentState cxl_cstate; + CXLCCI swcci; + DOECap doe_cdat; + uint64_t sn; +} CXLUpstreamPort; + +#endif /* CXL_SUP_H */ diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h index 17a8dfc107..d5d119ea7f 100644 --- a/include/hw/ppc/ppc.h +++ b/include/hw/ppc/ppc.h @@ -1,7 +1,7 @@ #ifndef HW_PPC_H #define HW_PPC_H -#include "target/ppc/cpu-qom.h" +#include "target/ppc/cpu.h" void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level); PowerPCCPU *ppc_get_vcpu_by_pir(int pir); diff --git a/include/hw/s390x/css.h b/include/hw/s390x/css.h index 75e5381613..ba72ee3dd2 100644 --- a/include/hw/s390x/css.h +++ b/include/hw/s390x/css.h @@ -233,7 +233,7 @@ typedef enum { } CssIoAdapterType; void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc); -int css_do_sic(CPUS390XState *env, uint8_t isc, uint16_t mode); +int css_do_sic(S390CPU *cpu, uint8_t isc, uint16_t mode); uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc); void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable, uint8_t flags, Error **errp); diff --git a/include/hw/s390x/sclp.h b/include/hw/s390x/sclp.h index b4ecd04e23..b405a387b6 100644 --- a/include/hw/s390x/sclp.h +++ b/include/hw/s390x/sclp.h @@ -225,8 +225,7 @@ static inline int sccb_data_len(SCCB *sccb) void s390_sclp_init(void); void sclp_service_interrupt(uint32_t sccb); void raise_irq_cpu_hotplug(void); -int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code); -int sclp_service_call_protected(CPUS390XState *env, uint64_t sccb, - uint32_t code); +int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code); +int sclp_service_call_protected(S390CPU *cpu, uint64_t sccb, uint32_t code); #endif diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index 96ccc18cd3..a86d103f82 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -26,6 +26,18 @@ typedef enum VhostSetConfigType { VHOST_SET_CONFIG_TYPE_MIGRATION = 1, } VhostSetConfigType; +typedef enum VhostDeviceStateDirection { + /* Transfer state from back-end (device) to front-end */ + VHOST_TRANSFER_STATE_DIRECTION_SAVE = 0, + /* Transfer state from front-end to back-end (device) */ + VHOST_TRANSFER_STATE_DIRECTION_LOAD = 1, +} VhostDeviceStateDirection; + +typedef enum VhostDeviceStatePhase { + /* The device (and all its vrings) is stopped */ + VHOST_TRANSFER_STATE_PHASE_STOPPED = 0, +} VhostDeviceStatePhase; + struct vhost_inflight; struct vhost_dev; struct vhost_log; @@ -129,6 +141,15 @@ typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev, typedef void (*vhost_reset_status_op)(struct vhost_dev *dev); +typedef bool (*vhost_supports_device_state_op)(struct vhost_dev *dev); +typedef int (*vhost_set_device_state_fd_op)(struct vhost_dev *dev, + VhostDeviceStateDirection direction, + VhostDeviceStatePhase phase, + int fd, + int *reply_fd, + Error **errp); +typedef int (*vhost_check_device_state_op)(struct vhost_dev *dev, Error **errp); + typedef struct VhostOps { VhostBackendType backend_type; vhost_backend_init vhost_backend_init; @@ -176,6 +197,9 @@ typedef struct VhostOps { vhost_force_iommu_op vhost_force_iommu; vhost_set_config_call_op vhost_set_config_call; vhost_reset_status_op vhost_reset_status; + vhost_supports_device_state_op vhost_supports_device_state; + vhost_set_device_state_fd_op vhost_set_device_state_fd; + vhost_check_device_state_op vhost_check_device_state; } VhostOps; int vhost_backend_update_device_iotlb(struct vhost_dev *dev, diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h index 20b69d8e85..d7c09ffd34 100644 --- a/include/hw/virtio/vhost-user.h +++ b/include/hw/virtio/vhost-user.h @@ -31,6 +31,7 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_STATUS = 16, /* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */ VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18, + VHOST_USER_PROTOCOL_F_DEVICE_STATE = 19, VHOST_USER_PROTOCOL_F_MAX }; diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index 5e8183f64a..05d7204a08 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -351,4 +351,117 @@ static inline int vhost_reset_device(struct vhost_dev *hdev) } #endif /* CONFIG_VHOST */ +/** + * vhost_supports_device_state(): Checks whether the back-end supports + * transferring internal device state for the purpose of migration. + * Support for this feature is required for vhost_set_device_state_fd() + * and vhost_check_device_state(). + * + * @dev: The vhost device + * + * Returns true if the device supports these commands, and false if it + * does not. + */ +bool vhost_supports_device_state(struct vhost_dev *dev); + +/** + * vhost_set_device_state_fd(): Begin transfer of internal state from/to + * the back-end for the purpose of migration. Data is to be transferred + * over a pipe according to @direction and @phase. The sending end must + * only write to the pipe, and the receiving end must only read from it. + * Once the sending end is done, it closes its FD. The receiving end + * must take this as the end-of-transfer signal and close its FD, too. + * + * @fd is the back-end's end of the pipe: The write FD for SAVE, and the + * read FD for LOAD. This function transfers ownership of @fd to the + * back-end, i.e. closes it in the front-end. + * + * The back-end may optionally reply with an FD of its own, if this + * improves efficiency on its end. In this case, the returned FD is + * stored in *reply_fd. The back-end will discard the FD sent to it, + * and the front-end must use *reply_fd for transferring state to/from + * the back-end. + * + * @dev: The vhost device + * @direction: The direction in which the state is to be transferred. + * For outgoing migrations, this is SAVE, and data is read + * from the back-end and stored by the front-end in the + * migration stream. + * For incoming migrations, this is LOAD, and data is read + * by the front-end from the migration stream and sent to + * the back-end to restore the saved state. + * @phase: Which migration phase we are in. Currently, there is only + * STOPPED (device and all vrings are stopped), in the future, + * more phases such as PRE_COPY or POST_COPY may be added. + * @fd: Back-end's end of the pipe through which to transfer state; note + * that ownership is transferred to the back-end, so this function + * closes @fd in the front-end. + * @reply_fd: If the back-end wishes to use a different pipe for state + * transfer, this will contain an FD for the front-end to + * use. Otherwise, -1 is stored here. + * @errp: Potential error description + * + * Returns 0 on success, and -errno on failure. + */ +int vhost_set_device_state_fd(struct vhost_dev *dev, + VhostDeviceStateDirection direction, + VhostDeviceStatePhase phase, + int fd, + int *reply_fd, + Error **errp); + +/** + * vhost_set_device_state_fd(): After transferring state from/to the + * back-end via vhost_set_device_state_fd(), i.e. once the sending end + * has closed the pipe, inquire the back-end to report any potential + * errors that have occurred on its side. This allows to sense errors + * like: + * - During outgoing migration, when the source side had already started + * to produce its state, something went wrong and it failed to finish + * - During incoming migration, when the received state is somehow + * invalid and cannot be processed by the back-end + * + * @dev: The vhost device + * @errp: Potential error description + * + * Returns 0 when the back-end reports successful state transfer and + * processing, and -errno when an error occurred somewhere. + */ +int vhost_check_device_state(struct vhost_dev *dev, Error **errp); + +/** + * vhost_save_backend_state(): High-level function to receive a vhost + * back-end's state, and save it in @f. Uses + * `vhost_set_device_state_fd()` to get the data from the back-end, and + * stores it in consecutive chunks that are each prefixed by their + * respective length (be32). The end is marked by a 0-length chunk. + * + * Must only be called while the device and all its vrings are stopped + * (`VHOST_TRANSFER_STATE_PHASE_STOPPED`). + * + * @dev: The vhost device from which to save the state + * @f: Migration stream in which to save the state + * @errp: Potential error message + * + * Returns 0 on success, and -errno otherwise. + */ +int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp); + +/** + * vhost_load_backend_state(): High-level function to load a vhost + * back-end's state from @f, and send it over to the back-end. Reads + * the data from @f in the format used by `vhost_save_state()`, and uses + * `vhost_set_device_state_fd()` to transfer it to the back-end. + * + * Must only be called while the device and all its vrings are stopped + * (`VHOST_TRANSFER_STATE_PHASE_STOPPED`). + * + * @dev: The vhost device to which to send the sate + * @f: Migration stream from which to load the state + * @errp: Potential error message + * + * Returns 0 on success, and -errno otherwise. + */ +int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp); + #endif diff --git a/include/hw/xen/interface/arch-arm.h b/include/hw/xen/interface/arch-arm.h index 94b31511dd..1528ced509 100644 --- a/include/hw/xen/interface/arch-arm.h +++ b/include/hw/xen/interface/arch-arm.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * arch-arm.h * * Guest OS interface to ARM Xen. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright 2011 (C) Citrix Systems */ @@ -361,6 +344,7 @@ typedef uint64_t xen_callback_t; #define PSR_DBG_MASK (1<<9) /* arm64: Debug Exception mask */ #define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */ #define PSR_JAZELLE (1<<24) /* Jazelle Mode */ +#define PSR_Z (1<<30) /* Zero condition flag */ /* 32 bit modes */ #define PSR_MODE_USR 0x10 @@ -383,7 +367,15 @@ typedef uint64_t xen_callback_t; #define PSR_MODE_EL1t 0x04 #define PSR_MODE_EL0t 0x00 -#define PSR_GUEST32_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC) +/* + * We set PSR_Z to be able to boot Linux kernel versions with an invalid + * encoding of the first 8 NOP instructions. See commit a92882a4d270 in + * Linux. + * + * Note that PSR_Z is also set by U-Boot and QEMU -kernel when loading + * zImage kernels on aarch32. + */ +#define PSR_GUEST32_INIT (PSR_Z|PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC) #define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h) #define SCTLR_GUEST_INIT xen_mk_ullong(0x00c50078) @@ -398,6 +390,10 @@ typedef uint64_t xen_callback_t; /* Physical Address Space */ +/* Virtio MMIO mappings */ +#define GUEST_VIRTIO_MMIO_BASE xen_mk_ullong(0x02000000) +#define GUEST_VIRTIO_MMIO_SIZE xen_mk_ullong(0x00100000) + /* * vGIC mappings: Only one set of mapping is used by the guest. * Therefore they can overlap. @@ -484,6 +480,9 @@ typedef uint64_t xen_callback_t; #define GUEST_VPL011_SPI 32 +#define GUEST_VIRTIO_MMIO_SPI_FIRST 33 +#define GUEST_VIRTIO_MMIO_SPI_LAST 43 + /* PSCI functions */ #define PSCI_cpu_suspend 0 #define PSCI_cpu_off 1 diff --git a/include/hw/xen/interface/arch-x86/cpuid.h b/include/hw/xen/interface/arch-x86/cpuid.h index ce46305bee..7ecd16ae05 100644 --- a/include/hw/xen/interface/arch-x86/cpuid.h +++ b/include/hw/xen/interface/arch-x86/cpuid.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * arch-x86/cpuid.h * * CPUID interface to Xen. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2007 Citrix Systems, Inc. * * Authors: @@ -102,6 +85,18 @@ #define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2) #define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */ #define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */ +/* + * With interrupt format set to 0 (non-remappable) bits 55:49 from the + * IO-APIC RTE and bits 11:5 from the MSI address can be used to store + * high bits for the Destination ID. This expands the Destination ID + * field from 8 to 15 bits, allowing to target APIC IDs up 32768. + */ +#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5) +/* + * Per-vCPU event channel upcalls work correctly with physical IRQs + * bound to event channels. + */ +#define XEN_HVM_CPUID_UPCALL_VECTOR (1u << 6) /* * Leaf 6 (0x40000x05) diff --git a/include/hw/xen/interface/arch-x86/xen-x86_32.h b/include/hw/xen/interface/arch-x86/xen-x86_32.h index 19d7388633..139438e835 100644 --- a/include/hw/xen/interface/arch-x86/xen-x86_32.h +++ b/include/hw/xen/interface/arch-x86/xen-x86_32.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2004-2007, K A Fraser */ diff --git a/include/hw/xen/interface/arch-x86/xen-x86_64.h b/include/hw/xen/interface/arch-x86/xen-x86_64.h index 40aed14366..5d9035ed22 100644 --- a/include/hw/xen/interface/arch-x86/xen-x86_64.h +++ b/include/hw/xen/interface/arch-x86/xen-x86_64.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2004-2006, K A Fraser */ diff --git a/include/hw/xen/interface/arch-x86/xen.h b/include/hw/xen/interface/arch-x86/xen.h index 7acd94c8eb..c0f4551247 100644 --- a/include/hw/xen/interface/arch-x86/xen.h +++ b/include/hw/xen/interface/arch-x86/xen.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2004-2006, K A Fraser */ @@ -320,12 +303,9 @@ struct xen_arch_domainconfig { uint32_t misc_flags; }; -/* Location of online VCPU bitmap. */ -#define XEN_ACPI_CPU_MAP 0xaf00 -#define XEN_ACPI_CPU_MAP_LEN ((HVM_MAX_VCPUS + 7) / 8) +/* Max XEN_X86_* constant. Used for ABI checking. */ +#define XEN_X86_MISC_FLAGS_MAX XEN_X86_MSR_RELAXED -/* GPE0 bit set during CPU hotplug */ -#define XEN_ACPI_GPE0_CPUHP_BIT 2 #endif /* diff --git a/include/hw/xen/interface/event_channel.h b/include/hw/xen/interface/event_channel.h index 73c9f38ce1..0d91a1c4af 100644 --- a/include/hw/xen/interface/event_channel.h +++ b/include/hw/xen/interface/event_channel.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * event_channel.h * * Event channels between domains. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2003-2004, K A Fraser. */ diff --git a/include/hw/xen/interface/features.h b/include/hw/xen/interface/features.h index 9ee2f760ef..d2a9175aae 100644 --- a/include/hw/xen/interface/features.h +++ b/include/hw/xen/interface/features.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2006, Keir Fraser <keir@xensource.com> */ diff --git a/include/hw/xen/interface/grant_table.h b/include/hw/xen/interface/grant_table.h index 7934d7b718..1dfa17a6d0 100644 --- a/include/hw/xen/interface/grant_table.h +++ b/include/hw/xen/interface/grant_table.h @@ -1,27 +1,10 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2004, K A Fraser */ diff --git a/include/hw/xen/interface/hvm/hvm_op.h b/include/hw/xen/interface/hvm/hvm_op.h index 870ec52060..e22adf0319 100644 --- a/include/hw/xen/interface/hvm/hvm_op.h +++ b/include/hw/xen/interface/hvm/hvm_op.h @@ -1,22 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2007, Keir Fraser */ diff --git a/include/hw/xen/interface/hvm/params.h b/include/hw/xen/interface/hvm/params.h index c9d6e70d7b..a22b4ed45d 100644 --- a/include/hw/xen/interface/hvm/params.h +++ b/include/hw/xen/interface/hvm/params.h @@ -1,22 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2007, Keir Fraser */ diff --git a/include/hw/xen/interface/io/blkif.h b/include/hw/xen/interface/io/blkif.h index 4cdba79aba..22f1eef0c0 100644 --- a/include/hw/xen/interface/io/blkif.h +++ b/include/hw/xen/interface/io/blkif.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2003-2004, Keir Fraser * Copyright (c) 2012, Spectra Logic Corporation */ @@ -363,6 +346,14 @@ * that the frontend requires that the logical block size is 512 as it * is hardcoded (which is the case in some frontend implementations). * + * trusted + * Values: 0/1 (boolean) + * Default value: 1 + * + * A value of "0" indicates that the frontend should not trust the + * backend, and should deploy whatever measures available to protect from + * a malicious backend on the other end. + * *------------------------- Virtual Device Properties ------------------------- * * device-type diff --git a/include/hw/xen/interface/io/console.h b/include/hw/xen/interface/io/console.h index 4811f47220..4509b4b689 100644 --- a/include/hw/xen/interface/io/console.h +++ b/include/hw/xen/interface/io/console.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * console.h * * Console I/O interface for Xen guest OSes. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2005, Keir Fraser */ diff --git a/include/hw/xen/interface/io/fbif.h b/include/hw/xen/interface/io/fbif.h index cc25aab32e..93c73195d8 100644 --- a/include/hw/xen/interface/io/fbif.h +++ b/include/hw/xen/interface/io/fbif.h @@ -1,24 +1,7 @@ +/* SPDX-License-Identifier: MIT */ /* * fbif.h -- Xen virtual frame buffer device * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com> * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com> */ diff --git a/include/hw/xen/interface/io/kbdif.h b/include/hw/xen/interface/io/kbdif.h index a6b01c52c7..4bde6b3821 100644 --- a/include/hw/xen/interface/io/kbdif.h +++ b/include/hw/xen/interface/io/kbdif.h @@ -1,24 +1,7 @@ +/* SPDX-License-Identifier: MIT */ /* * kbdif.h -- Xen virtual keyboard/mouse * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com> * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com> */ diff --git a/include/hw/xen/interface/io/netif.h b/include/hw/xen/interface/io/netif.h index 00dd258712..c13b85061d 100644 --- a/include/hw/xen/interface/io/netif.h +++ b/include/hw/xen/interface/io/netif.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2003-2004, Keir Fraser */ @@ -161,6 +144,12 @@ */ /* + * The setting of "trusted" node to "0" in the frontend path signals that the + * frontend should not trust the backend, and should deploy whatever measures + * available to protect from a malicious backend on the other end. + */ + +/* * Control ring * ============ * diff --git a/include/hw/xen/interface/io/protocols.h b/include/hw/xen/interface/io/protocols.h index 52b4de0f81..7815e1ff0f 100644 --- a/include/hw/xen/interface/io/protocols.h +++ b/include/hw/xen/interface/io/protocols.h @@ -1,24 +1,7 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * protocols.h * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2008, Keir Fraser */ diff --git a/include/hw/xen/interface/io/ring.h b/include/hw/xen/interface/io/ring.h index c486c457e0..025939278b 100644 --- a/include/hw/xen/interface/io/ring.h +++ b/include/hw/xen/interface/io/ring.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Tim Deegan and Andrew Warfield November 2004. */ @@ -95,9 +78,8 @@ typedef unsigned int RING_IDX; * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * - * mytag_front_ring_t front_ring; - * SHARED_RING_INIT((mytag_sring_t *)shared_page); - * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + * mytag_front_ring_t ring; + * XEN_FRONT_RING_INIT(&ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): @@ -184,6 +166,11 @@ typedef struct __name##_back_ring __name##_back_ring_t #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) +#define XEN_FRONT_RING_INIT(r, s, size) do { \ + SHARED_RING_INIT(s); \ + FRONT_RING_INIT(r, s, size); \ +} while (0) + #define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ (_r)->rsp_prod_pvt = (_i); \ (_r)->req_cons = (_i); \ @@ -208,11 +195,11 @@ typedef struct __name##_back_ring __name##_back_ring_t (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ -#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ +#define XEN_RING_NR_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ -#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ +#define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ @@ -220,13 +207,27 @@ typedef struct __name##_back_ring __name##_back_ring_t }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ -#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ +#define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif +#ifdef XEN_RING_HAS_UNCONSUMED_IS_BOOL +/* + * These variants should only be used in case no caller is abusing them for + * obtaining the number of unconsumed responses/requests. + */ +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ + (!!XEN_RING_NR_UNCONSUMED_RESPONSES(_r)) +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ + (!!XEN_RING_NR_UNCONSUMED_REQUESTS(_r)) +#else +#define RING_HAS_UNCONSUMED_RESPONSES(_r) XEN_RING_NR_UNCONSUMED_RESPONSES(_r) +#define RING_HAS_UNCONSUMED_REQUESTS(_r) XEN_RING_NR_UNCONSUMED_REQUESTS(_r) +#endif + /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) diff --git a/include/hw/xen/interface/io/usbif.h b/include/hw/xen/interface/io/usbif.h index c0a552e195..875af0dc7c 100644 --- a/include/hw/xen/interface/io/usbif.h +++ b/include/hw/xen/interface/io/usbif.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * usbif.h * @@ -5,24 +6,6 @@ * * Copyright (C) 2009, FUJITSU LABORATORIES LTD. * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_IO_USBIF_H__ diff --git a/include/hw/xen/interface/io/xenbus.h b/include/hw/xen/interface/io/xenbus.h index 927f9db552..9cd0cd7c67 100644 --- a/include/hw/xen/interface/io/xenbus.h +++ b/include/hw/xen/interface/io/xenbus.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /***************************************************************************** * xenbus.h * * Xenbus protocol details. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (C) 2005 XenSource Ltd. */ diff --git a/include/hw/xen/interface/io/xs_wire.h b/include/hw/xen/interface/io/xs_wire.h index 4dd6632669..04e6849feb 100644 --- a/include/hw/xen/interface/io/xs_wire.h +++ b/include/hw/xen/interface/io/xs_wire.h @@ -1,25 +1,8 @@ +/* SPDX-License-Identifier: MIT */ /* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (C) 2005 Rusty Russell IBM Corporation */ @@ -71,11 +54,12 @@ struct xsd_errors #ifdef EINVAL #define XSD_ERROR(x) { x, #x } /* LINTED: static unused */ -static struct xsd_errors xsd_errors[] +static const struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { + /* /!\ New errors should be added at the end of the array. */ XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), @@ -90,7 +74,8 @@ __attribute__((unused)) XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN), - XSD_ERROR(E2BIG) + XSD_ERROR(E2BIG), + XSD_ERROR(EPERM), }; #endif @@ -124,6 +109,7 @@ struct xenstore_domain_interface { XENSTORE_RING_IDX rsp_cons, rsp_prod; uint32_t server_features; /* Bitmap of features supported by the server */ uint32_t connection; + uint32_t error; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ @@ -135,10 +121,18 @@ struct xenstore_domain_interface { /* The ability to reconnect a ring */ #define XENSTORE_SERVER_FEATURE_RECONNECTION 1 +/* The presence of the "error" field in the ring page */ +#define XENSTORE_SERVER_FEATURE_ERROR 2 /* Valid values for the connection field */ #define XENSTORE_CONNECTED 0 /* the steady-state */ -#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */ +#define XENSTORE_RECONNECT 1 /* reconnect in progress */ + +/* Valid values for the error field */ +#define XENSTORE_ERROR_NONE 0 /* No error */ +#define XENSTORE_ERROR_COMM 1 /* Communication problem */ +#define XENSTORE_ERROR_RINGIDX 2 /* Invalid ring index */ +#define XENSTORE_ERROR_PROTO 3 /* Protocol violation (payload too long) */ #endif /* _XS_WIRE_H */ diff --git a/include/hw/xen/interface/memory.h b/include/hw/xen/interface/memory.h index 383a9468c3..29cf5c8239 100644 --- a/include/hw/xen/interface/memory.h +++ b/include/hw/xen/interface/memory.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * memory.h * * Memory reservation and information. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2005, Keir Fraser <keir@xensource.com> */ @@ -541,12 +524,14 @@ struct xen_mem_sharing_op { uint32_t gref; /* IN: gref to debug */ } u; } debug; - struct mem_sharing_op_fork { /* OP_FORK */ + struct mem_sharing_op_fork { /* OP_FORK{,_RESET} */ domid_t parent_domain; /* IN: parent's domain id */ /* Only makes sense for short-lived forks */ #define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0) /* Only makes sense for short-lived forks */ #define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1) +#define XENMEM_FORK_RESET_STATE (1u << 2) +#define XENMEM_FORK_RESET_MEMORY (1u << 3) uint16_t flags; /* IN: optional settings */ uint32_t pad; /* Must be set to 0 */ } fork; @@ -662,6 +647,13 @@ struct xen_mem_acquire_resource { * two calls. */ uint32_t nr_frames; + /* + * Padding field, must be zero on input. + * In a previous version this was an output field with the lowest bit + * named XENMEM_rsrc_acq_caller_owned. Future versions of this interface + * will not reuse this bit as an output with the field being zero on + * input. + */ uint32_t pad; /* * IN - the index of the initial frame to be mapped. This parameter diff --git a/include/hw/xen/interface/physdev.h b/include/hw/xen/interface/physdev.h index d271766ad0..f0c0d4727c 100644 --- a/include/hw/xen/interface/physdev.h +++ b/include/hw/xen/interface/physdev.h @@ -1,22 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2006, Keir Fraser */ @@ -211,8 +194,8 @@ struct physdev_manage_pci_ext { /* IN */ uint8_t bus; uint8_t devfn; - unsigned is_extfn; - unsigned is_virtfn; + uint32_t is_extfn; + uint32_t is_virtfn; struct { uint8_t bus; uint8_t devfn; diff --git a/include/hw/xen/interface/sched.h b/include/hw/xen/interface/sched.h index 811bd87c82..b4362c6a1d 100644 --- a/include/hw/xen/interface/sched.h +++ b/include/hw/xen/interface/sched.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * sched.h * * Scheduler state interactions * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2005, Keir Fraser <keir@xensource.com> */ diff --git a/include/hw/xen/interface/trace.h b/include/hw/xen/interface/trace.h index d5fa4aea8d..62a179971d 100644 --- a/include/hw/xen/interface/trace.h +++ b/include/hw/xen/interface/trace.h @@ -1,24 +1,7 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * include/public/trace.h * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Mark Williamson, (C) 2004 Intel Research Cambridge * Copyright (C) 2005 Bin Ren */ diff --git a/include/hw/xen/interface/vcpu.h b/include/hw/xen/interface/vcpu.h index 3623af932f..81a3b3a743 100644 --- a/include/hw/xen/interface/vcpu.h +++ b/include/hw/xen/interface/vcpu.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * vcpu.h * * VCPU initialisation, query, and hotplug. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2005, Keir Fraser <keir@xensource.com> */ diff --git a/include/hw/xen/interface/version.h b/include/hw/xen/interface/version.h index 17a81e23cd..9c78b4f3b6 100644 --- a/include/hw/xen/interface/version.h +++ b/include/hw/xen/interface/version.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * version.h * * Xen version, type, and compile information. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com> * Copyright (c) 2005, Keir Fraser <keir@xensource.com> */ diff --git a/include/hw/xen/interface/xen-compat.h b/include/hw/xen/interface/xen-compat.h index e1c027a95c..97fe698498 100644 --- a/include/hw/xen/interface/xen-compat.h +++ b/include/hw/xen/interface/xen-compat.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2006, Christian Limpach */ diff --git a/include/hw/xen/interface/xen.h b/include/hw/xen/interface/xen.h index e373592c33..920567e006 100644 --- a/include/hw/xen/interface/xen.h +++ b/include/hw/xen/interface/xen.h @@ -1,26 +1,9 @@ +/* SPDX-License-Identifier: MIT */ /****************************************************************************** * xen.h * * Guest OS interface to Xen. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * * Copyright (c) 2004, K A Fraser */ diff --git a/include/hw/xen/xen-backend.h b/include/hw/xen/xen-backend.h index aac2fd454d..0f01631ae7 100644 --- a/include/hw/xen/xen-backend.h +++ b/include/hw/xen/xen-backend.h @@ -33,6 +33,7 @@ XenDevice *xen_backend_get_device(XenBackendInstance *backend); void xen_backend_register(const XenBackendInfo *info); const char **xen_backend_get_types(unsigned int *nr); +bool xen_backend_exists(const char *type, const char *name); void xen_backend_device_create(XenBus *xenbus, const char *type, const char *name, QDict *opts, Error **errp); bool xen_backend_try_device_destroy(XenDevice *xendev, Error **errp); diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h index f435898164..334ddd1ff6 100644 --- a/include/hw/xen/xen-bus.h +++ b/include/hw/xen/xen-bus.h @@ -33,6 +33,7 @@ struct XenDevice { }; typedef struct XenDevice XenDevice; +typedef char *(*XenDeviceGetFrontendPath)(XenDevice *xendev, Error **errp); typedef char *(*XenDeviceGetName)(XenDevice *xendev, Error **errp); typedef void (*XenDeviceRealize)(XenDevice *xendev, Error **errp); typedef void (*XenDeviceFrontendChanged)(XenDevice *xendev, @@ -46,6 +47,7 @@ struct XenDeviceClass { /*< public >*/ const char *backend; const char *device; + XenDeviceGetFrontendPath get_frontend_path; XenDeviceGetName get_name; XenDeviceRealize realize; XenDeviceFrontendChanged frontend_changed; @@ -73,7 +75,7 @@ struct XenBusClass { OBJECT_DECLARE_TYPE(XenBus, XenBusClass, XEN_BUS) -void xen_bus_init(void); +BusState *xen_bus_init(void); void xen_device_backend_set_state(XenDevice *xendev, enum xenbus_state state); @@ -129,5 +131,6 @@ void xen_device_notify_event_channel(XenDevice *xendev, void xen_device_unbind_event_channel(XenDevice *xendev, XenEventChannel *channel, Error **errp); +unsigned int xen_event_channel_get_local_port(XenEventChannel *channel); #endif /* HW_XEN_BUS_H */ diff --git a/include/hw/xen/xen-legacy-backend.h b/include/hw/xen/xen-legacy-backend.h index 6c307c5f2c..fc42146bc2 100644 --- a/include/hw/xen/xen-legacy-backend.h +++ b/include/hw/xen/xen-legacy-backend.h @@ -81,7 +81,6 @@ extern struct XenDevOps xen_usb_ops; /* xen-usb.c */ /* configuration (aka xenbus setup) */ void xen_config_cleanup(void); -int xen_config_dev_blk(DriveInfo *disk); int xen_config_dev_nic(NICInfo *nic); int xen_config_dev_vfb(int vdev, const char *type); int xen_config_dev_vkbd(int vdev); diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h index 3c1fab4b1e..ef91fc28bb 100644 --- a/include/sysemu/accel-ops.h +++ b/include/sysemu/accel-ops.h @@ -30,6 +30,7 @@ struct AccelOpsClass { void (*ops_init)(AccelOpsClass *ops); bool (*cpus_are_resettable)(void); + void (*cpu_reset_hold)(CPUState *cpu); void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */ void (*kick_vcpu_thread)(CPUState *cpu); diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 80b69d88f6..d614878164 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -521,7 +521,6 @@ int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source); * Returns: 0 on success, or a negative errno on failure. */ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target); -struct ppc_radix_page_info *kvm_get_radix_page_info(void); /* Notify resamplefd for EOI of specific interrupts. */ void kvm_resample_fd_notify(int gsi); diff --git a/include/ui/console.h b/include/ui/console.h index acb61a7f15..a4a49ffc64 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -462,12 +462,14 @@ struct QemuDisplay { DisplayType type; void (*early_init)(DisplayOptions *opts); void (*init)(DisplayState *ds, DisplayOptions *opts); + const char *vc; }; void qemu_display_register(QemuDisplay *ui); bool qemu_display_find_default(DisplayOptions *opts); void qemu_display_early_init(DisplayOptions *opts); void qemu_display_init(DisplayState *ds, DisplayOptions *opts); +const char *qemu_display_get_vc(DisplayOptions *opts); void qemu_display_help(void); /* vnc.c */ diff --git a/include/ui/pixman-minimal.h b/include/ui/pixman-minimal.h new file mode 100644 index 0000000000..efcf570c9e --- /dev/null +++ b/include/ui/pixman-minimal.h @@ -0,0 +1,195 @@ +/* + * SPDX-License-Identifier: MIT + * + * Tiny subset of PIXMAN API commonly used by QEMU. + * + * Copyright 1987, 1988, 1989, 1998 The Open Group + * Copyright 1987, 1988, 1989 Digital Equipment Corporation + * Copyright 1999, 2004, 2008 Keith Packard + * Copyright 2000 SuSE, Inc. + * Copyright 2000 Keith Packard, member of The XFree86 Project, Inc. + * Copyright 2004, 2005, 2007, 2008, 2009, 2010 Red Hat, Inc. + * Copyright 2004 Nicholas Miell + * Copyright 2005 Lars Knoll & Zack Rusin, Trolltech + * Copyright 2005 Trolltech AS + * Copyright 2007 Luca Barbato + * Copyright 2008 Aaron Plattner, NVIDIA Corporation + * Copyright 2008 Rodrigo Kumpera + * Copyright 2008 André Tupinambá + * Copyright 2008 Mozilla Corporation + * Copyright 2008 Frederic Plourde + * Copyright 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright 2009, 2010 Nokia Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef PIXMAN_MINIMAL_H +#define PIXMAN_MINIMAL_H + +#define PIXMAN_TYPE_OTHER 0 +#define PIXMAN_TYPE_ARGB 2 +#define PIXMAN_TYPE_ABGR 3 +#define PIXMAN_TYPE_BGRA 8 +#define PIXMAN_TYPE_RGBA 9 + +#define PIXMAN_FORMAT(bpp, type, a, r, g, b) (((bpp) << 24) | \ + ((type) << 16) | \ + ((a) << 12) | \ + ((r) << 8) | \ + ((g) << 4) | \ + ((b))) + +#define PIXMAN_FORMAT_RESHIFT(val, ofs, num) \ + (((val >> (ofs)) & ((1 << (num)) - 1)) << ((val >> 22) & 3)) + +#define PIXMAN_FORMAT_BPP(f) PIXMAN_FORMAT_RESHIFT(f, 24, 8) +#define PIXMAN_FORMAT_TYPE(f) (((f) >> 16) & 0x3f) +#define PIXMAN_FORMAT_A(f) PIXMAN_FORMAT_RESHIFT(f, 12, 4) +#define PIXMAN_FORMAT_R(f) PIXMAN_FORMAT_RESHIFT(f, 8, 4) +#define PIXMAN_FORMAT_G(f) PIXMAN_FORMAT_RESHIFT(f, 4, 4) +#define PIXMAN_FORMAT_B(f) PIXMAN_FORMAT_RESHIFT(f, 0, 4) +#define PIXMAN_FORMAT_DEPTH(f) (PIXMAN_FORMAT_A(f) + \ + PIXMAN_FORMAT_R(f) + \ + PIXMAN_FORMAT_G(f) + \ + PIXMAN_FORMAT_B(f)) + +typedef enum { + /* 32bpp formats */ + PIXMAN_a8r8g8b8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ARGB, 8, 8, 8, 8), + PIXMAN_x8r8g8b8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ARGB, 0, 8, 8, 8), + PIXMAN_a8b8g8r8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ABGR, 8, 8, 8, 8), + PIXMAN_x8b8g8r8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ABGR, 0, 8, 8, 8), + PIXMAN_b8g8r8a8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_BGRA, 8, 8, 8, 8), + PIXMAN_b8g8r8x8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_BGRA, 0, 8, 8, 8), + PIXMAN_r8g8b8a8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_RGBA, 8, 8, 8, 8), + PIXMAN_r8g8b8x8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_RGBA, 0, 8, 8, 8), + /* 24bpp formats */ + PIXMAN_r8g8b8 = PIXMAN_FORMAT(24, PIXMAN_TYPE_ARGB, 0, 8, 8, 8), + PIXMAN_b8g8r8 = PIXMAN_FORMAT(24, PIXMAN_TYPE_ABGR, 0, 8, 8, 8), + /* 16bpp formats */ + PIXMAN_r5g6b5 = PIXMAN_FORMAT(16, PIXMAN_TYPE_ARGB, 0, 5, 6, 5), + PIXMAN_a1r5g5b5 = PIXMAN_FORMAT(16, PIXMAN_TYPE_ARGB, 1, 5, 5, 5), + PIXMAN_x1r5g5b5 = PIXMAN_FORMAT(16, PIXMAN_TYPE_ARGB, 0, 5, 5, 5), +} pixman_format_code_t; + +typedef struct pixman_image pixman_image_t; + +typedef void (*pixman_image_destroy_func_t)(pixman_image_t *image, void *data); + +struct pixman_image { + int ref_count; + pixman_format_code_t format; + int width; + int height; + int stride; + uint32_t *data; + uint32_t *free_me; + pixman_image_destroy_func_t destroy_func; + void *destroy_data; +}; + +typedef struct pixman_color { + uint16_t red; + uint16_t green; + uint16_t blue; + uint16_t alpha; +} pixman_color_t; + +static inline pixman_image_t *pixman_image_create_bits(pixman_format_code_t format, + int width, + int height, + uint32_t *bits, + int rowstride_bytes) +{ + pixman_image_t *i = g_new0(pixman_image_t, 1); + + i->width = width; + i->height = height; + i->stride = rowstride_bytes ?: width * DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); + i->format = format; + if (bits) { + i->data = bits; + } else { + i->free_me = i->data = g_malloc0(rowstride_bytes * height); + } + i->ref_count = 1; + + return i; +} + +static inline pixman_image_t *pixman_image_ref(pixman_image_t *i) +{ + i->ref_count++; + return i; +} + +static inline bool pixman_image_unref(pixman_image_t *i) +{ + i->ref_count--; + + if (i->ref_count == 0) { + if (i->destroy_func) { + i->destroy_func(i, i->destroy_data); + } + g_free(i->free_me); + g_free(i); + + return true; + } + + return false; +} + +static inline void pixman_image_set_destroy_function(pixman_image_t *i, + pixman_image_destroy_func_t func, + void *data) + +{ + i->destroy_func = func; + i->destroy_data = data; +} + +static inline uint32_t *pixman_image_get_data(pixman_image_t *i) +{ + return i->data; +} + +static inline int pixman_image_get_height(pixman_image_t *i) +{ + return i->height; +} + +static inline int pixman_image_get_width(pixman_image_t *i) +{ + return i->width; +} + +static inline int pixman_image_get_stride(pixman_image_t *i) +{ + return i->stride; +} + +static inline pixman_format_code_t pixman_image_get_format(pixman_image_t *i) +{ + return i->format; +} + +#endif /* PIXMAN_MINIMAL_H */ diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h index e587c48b1f..ef13a8210c 100644 --- a/include/ui/qemu-pixman.h +++ b/include/ui/qemu-pixman.h @@ -6,11 +6,11 @@ #ifndef QEMU_PIXMAN_H #define QEMU_PIXMAN_H -/* pixman-0.16.0 headers have a redundant declaration */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wredundant-decls" +#ifdef CONFIG_PIXMAN #include <pixman.h> -#pragma GCC diagnostic pop +#else +#include "pixman-minimal.h" +#endif /* * pixman image formats are defined to be native endian, @@ -74,17 +74,17 @@ pixman_format_code_t qemu_default_pixman_format(int bpp, bool native_endian); pixman_format_code_t qemu_drm_format_to_pixman(uint32_t drm_format); uint32_t qemu_pixman_to_drm_format(pixman_format_code_t pixman); int qemu_pixman_get_type(int rshift, int gshift, int bshift); -pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf); bool qemu_pixman_check_format(DisplayChangeListener *dcl, pixman_format_code_t format); +#ifdef CONFIG_PIXMAN +pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf); pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format, int width); void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb, int width, int x, int y); pixman_image_t *qemu_pixman_mirror_create(pixman_format_code_t format, pixman_image_t *image); -void qemu_pixman_image_unref(pixman_image_t *image); pixman_image_t *qemu_pixman_glyph_from_vgafont(int height, const uint8_t *font, unsigned int ch); @@ -93,6 +93,9 @@ void qemu_pixman_glyph_render(pixman_image_t *glyph, pixman_color_t *fgcol, pixman_color_t *bgcol, int x, int y, int cw, int ch); +#endif + +void qemu_pixman_image_unref(pixman_image_t *image); G_DEFINE_AUTOPTR_CLEANUP_FUNC(pixman_image_t, qemu_pixman_image_unref) diff --git a/include/ui/rect.h b/include/ui/rect.h new file mode 100644 index 0000000000..94898f92d0 --- /dev/null +++ b/include/ui/rect.h @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef QEMU_RECT_H +#define QEMU_RECT_H + +#include <stdint.h> +#include <stdbool.h> + +typedef struct QemuRect { + int16_t x; + int16_t y; + uint16_t width; + uint16_t height; +} QemuRect; + +static inline void qemu_rect_init(QemuRect *rect, + int16_t x, int16_t y, + uint16_t width, uint16_t height) +{ + rect->x = x; + rect->y = x; + rect->width = width; + rect->height = height; +} + +static inline void qemu_rect_translate(QemuRect *rect, + int16_t dx, int16_t dy) +{ + rect->x += dx; + rect->y += dy; +} + +static inline bool qemu_rect_intersect(const QemuRect *a, const QemuRect *b, + QemuRect *res) +{ + int16_t x1, x2, y1, y2; + + x1 = MAX(a->x, b->x); + y1 = MAX(a->y, b->y); + x2 = MIN(a->x + a->width, b->x + b->width); + y2 = MIN(a->y + a->height, b->y + b->height); + + if (x1 >= x2 || y1 >= y2) { + if (res) { + qemu_rect_init(res, 0, 0, 0, 0); + } + + return false; + } + + if (res) { + qemu_rect_init(res, x1, y1, x2 - x1, y2 - y1); + } + + return true; +} + +#endif diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c index 8ab1335106..d5232f37fe 100644 --- a/linux-user/hppa/cpu_loop.c +++ b/linux-user/hppa/cpu_loop.c @@ -147,12 +147,10 @@ void cpu_loop(CPUHPPAState *env) force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, env->iaoq_f); break; case EXCP_ILL: - EXCP_DUMP(env, "qemu: EXCP_ILL exception %#x\n", trapnr); force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->iaoq_f); break; case EXCP_PRIV_OPR: /* check for glibc ABORT_INSTRUCTION "iitlbp %r0,(%sr0, %r0)" */ - EXCP_DUMP(env, "qemu: EXCP_PRIV_OPR exception %#x\n", trapnr); if (env->cr[CR_IIR] == 0x04000000) { force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->iaoq_f); } else { @@ -160,7 +158,6 @@ void cpu_loop(CPUHPPAState *env) } break; case EXCP_PRIV_REG: - EXCP_DUMP(env, "qemu: EXCP_PRIV_REG exception %#x\n", trapnr); force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVREG, env->iaoq_f); break; case EXCP_OVERFLOW: @@ -173,7 +170,6 @@ void cpu_loop(CPUHPPAState *env) force_sig_fault(TARGET_SIGFPE, 0, env->iaoq_f); break; case EXCP_BREAK: - EXCP_DUMP(env, "qemu: EXCP_BREAK exception %#x\n", trapnr); force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f & ~3); break; case EXCP_DEBUG: diff --git a/linux-user/hppa/signal.c b/linux-user/hppa/signal.c index 17920e9ceb..d08a97dae6 100644 --- a/linux-user/hppa/signal.c +++ b/linux-user/hppa/signal.c @@ -86,7 +86,7 @@ static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env) static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc) { - target_ulong psw; + abi_ulong psw; int i; __get_user(psw, &sc->sc_gr[0]); @@ -150,10 +150,10 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, haddr = ka->_sa_handler; if (haddr & 2) { /* Function descriptor. */ - target_ulong *fdesc, dest; + abi_ptr *fdesc, dest; haddr &= -4; - fdesc = lock_user(VERIFY_READ, haddr, 2 * sizeof(target_ulong), 1); + fdesc = lock_user(VERIFY_READ, haddr, 2 * sizeof(abi_ptr), 1); if (!fdesc) { goto give_sigsegv; } diff --git a/linux-user/hppa/target_elf.h b/linux-user/hppa/target_elf.h index 82b4e9535e..19cae8bd65 100644 --- a/linux-user/hppa/target_elf.h +++ b/linux-user/hppa/target_elf.h @@ -9,6 +9,6 @@ #define HPPA_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - return "any"; + return "hppa"; } #endif diff --git a/linux-user/main.c b/linux-user/main.c index 0c23584a96..0cdaf30d34 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -156,7 +156,7 @@ void fork_end(int child) Discard information about the parent threads. */ CPU_FOREACH_SAFE(cpu, next_cpu) { if (cpu != thread_cpu) { - QTAILQ_REMOVE_RCU(&cpus, cpu, node); + QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node); } } qemu_init_cpu_list(); diff --git a/meson.build b/meson.build index 51a51075db..4848930680 100644 --- a/meson.build +++ b/meson.build @@ -813,10 +813,11 @@ if 'ust' in get_option('trace_backends') method: 'pkg-config') endif pixman = not_found -if have_system or have_tools - pixman = dependency('pixman-1', required: have_system, version:'>=0.21.8', +if not get_option('pixman').auto() or have_system or have_tools + pixman = dependency('pixman-1', required: get_option('pixman'), version:'>=0.21.8', method: 'pkg-config') endif + zlib = dependency('zlib', required: true) libaio = not_found @@ -1011,7 +1012,11 @@ if not get_option('spice_protocol').auto() or have_system method: 'pkg-config') endif spice = not_found -if not get_option('spice').auto() or have_system +if get_option('spice') \ + .disable_auto_if(not have_system) \ + .require(pixman.found(), + error_message: 'cannot enable SPICE if pixman is not available') \ + .allowed() spice = dependency('spice-server', version: '>=0.14.0', required: get_option('spice'), method: 'pkg-config') @@ -1523,7 +1528,11 @@ gtkx11 = not_found vte = not_found have_gtk_clipboard = get_option('gtk_clipboard').enabled() -if not get_option('gtk').auto() or have_system +if get_option('gtk') \ + .disable_auto_if(not have_system) \ + .require(pixman.found(), + error_message: 'cannot enable GTK if pixman is not available') \ + .allowed() gtk = dependency('gtk+-3.0', version: '>=3.22.0', method: 'pkg-config', required: get_option('gtk')) @@ -1556,7 +1565,11 @@ endif vnc = not_found jpeg = not_found sasl = not_found -if get_option('vnc').allowed() and have_system +if get_option('vnc') \ + .disable_auto_if(not have_system) \ + .require(pixman.found(), + error_message: 'cannot enable VNC if pixman is not available') \ + .allowed() vnc = declare_dependency() # dummy dependency jpeg = dependency('libjpeg', required: get_option('vnc_jpeg'), method: 'pkg-config') @@ -2149,6 +2162,7 @@ config_host_data.set('CONFIG_SECCOMP', seccomp.found()) if seccomp.found() config_host_data.set('CONFIG_SECCOMP_SYSRAWRC', seccomp_has_sysrawrc) endif +config_host_data.set('CONFIG_PIXMAN', pixman.found()) config_host_data.set('CONFIG_SNAPPY', snappy.found()) config_host_data.set('CONFIG_SOLARIS', targetos == 'sunos') if get_option('tcg').allowed() @@ -2868,6 +2882,7 @@ have_ivshmem = config_host_data.get('CONFIG_EVENTFD') host_kconfig = \ (get_option('fuzzing') ? ['CONFIG_FUZZ=y'] : []) + \ (have_tpm ? ['CONFIG_TPM=y'] : []) + \ + (pixman.found() ? ['CONFIG_PIXMAN=y'] : []) + \ (spice.found() ? ['CONFIG_SPICE=y'] : []) + \ (have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \ (opengl.found() ? ['CONFIG_OPENGL=y'] : []) + \ diff --git a/meson_options.txt b/meson_options.txt index 5c212fcd45..c9baeda639 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -226,6 +226,8 @@ option('l2tpv3', type : 'feature', value : 'auto', description: 'l2tpv3 network backend support') option('netmap', type : 'feature', value : 'auto', description: 'netmap network backend support') +option('pixman', type : 'feature', value : 'auto', + description: 'pixman support') option('slirp', type: 'feature', value: 'auto', description: 'libslirp user mode network backend support') option('vde', type : 'feature', value : 'auto', diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 7a226c93bc..d0614d7954 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -121,6 +121,8 @@ static const uint64_t vdpa_svq_device_features = BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | /* VHOST_F_LOG_ALL is exposed by SVQ */ BIT_ULL(VHOST_F_LOG_ALL) | + BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | + BIT_ULL(VIRTIO_NET_F_RSS) | BIT_ULL(VIRTIO_NET_F_RSC_EXT) | BIT_ULL(VIRTIO_NET_F_STANDBY) | BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX); @@ -240,6 +242,12 @@ static void vhost_vdpa_cleanup(NetClientState *nc) } } +/** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */ +static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd) +{ + return true; +} + static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) { assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); @@ -422,6 +430,7 @@ static NetClientInfo net_vhost_vdpa_info = { .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, .has_ufo = vhost_vdpa_has_ufo, .check_peer_type = vhost_vdpa_check_peer_type, + .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, }; static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index, @@ -818,6 +827,103 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, return 0; } +static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n, + struct iovec *out_cursor, + struct iovec *in_cursor, bool do_rss) +{ + struct virtio_net_rss_config cfg = {}; + ssize_t r; + g_autofree uint16_t *table = NULL; + + /* + * According to VirtIO standard, "Initially the device has all hash + * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.". + * + * Therefore, there is no need to send this CVQ command if the + * driver disables the all hash types, which aligns with + * the device's defaults. + * + * Note that the device's defaults can mismatch the driver's + * configuration only at live migration. + */ + if (!n->rss_data.enabled || + n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) { + return 0; + } + + table = g_malloc_n(n->rss_data.indirections_len, + sizeof(n->rss_data.indirections_table[0])); + cfg.hash_types = cpu_to_le32(n->rss_data.hash_types); + + if (do_rss) { + /* + * According to VirtIO standard, "Number of entries in indirection_table + * is (indirection_table_mask + 1)". + */ + cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len - + 1); + cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue); + for (int i = 0; i < n->rss_data.indirections_len; ++i) { + table[i] = cpu_to_le16(n->rss_data.indirections_table[i]); + } + cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs); + } else { + /* + * According to VirtIO standard, "Field reserved MUST contain zeroes. + * It is defined to make the structure to match the layout of + * virtio_net_rss_config structure, defined in 5.1.6.5.7.". + * + * Therefore, we need to zero the fields in + * struct virtio_net_rss_config, which corresponds to the + * `reserved` field in struct virtio_net_hash_config. + * + * Note that all other fields are zeroed at their definitions, + * except for the `indirection_table` field, where the actual data + * is stored in the `table` variable to ensure compatibility + * with RSS case. Therefore, we need to zero the `table` variable here. + */ + table[0] = 0; + } + + /* + * Considering that virtio_net_handle_rss() currently does not restore + * the hash key length parsed from the CVQ command sent from the guest + * into n->rss_data and uses the maximum key length in other code, so + * we also employ the maximum key length here. + */ + cfg.hash_key_length = sizeof(n->rss_data.key); + + const struct iovec data[] = { + { + .iov_base = &cfg, + .iov_len = offsetof(struct virtio_net_rss_config, + indirection_table), + }, { + .iov_base = table, + .iov_len = n->rss_data.indirections_len * + sizeof(n->rss_data.indirections_table[0]), + }, { + .iov_base = &cfg.max_tx_vq, + .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) - + offsetof(struct virtio_net_rss_config, max_tx_vq), + }, { + .iov_base = (void *)n->rss_data.key, + .iov_len = sizeof(n->rss_data.key), + } + }; + + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_MQ, + do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG : + VIRTIO_NET_CTRL_MQ_HASH_CONFIG, + data, ARRAY_SIZE(data)); + if (unlikely(r < 0)) { + return r; + } + + return 0; +} + static int vhost_vdpa_net_load_mq(VhostVDPAState *s, const VirtIONet *n, struct iovec *out_cursor, @@ -843,6 +949,21 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, return r; } + if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) { + /* load the receive-side scaling state */ + r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true); + if (unlikely(r < 0)) { + return r; + } + } else if (virtio_vdev_has_feature(&n->parent_obj, + VIRTIO_NET_F_HASH_REPORT)) { + /* load the hash calculation state */ + r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false); + if (unlikely(r < 0)) { + return r; + } + } + return 0; } @@ -1166,6 +1287,7 @@ static NetClientInfo net_vhost_vdpa_cvq_info = { .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, .has_ufo = vhost_vdpa_has_ufo, .check_peer_type = vhost_vdpa_check_peer_type, + .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, }; /* diff --git a/plugins/core.c b/plugins/core.c index fcd33a2bff..49588285dd 100644 --- a/plugins/core.c +++ b/plugins/core.c @@ -21,7 +21,6 @@ #include "qemu/xxhash.h" #include "qemu/rcu.h" #include "hw/core/cpu.h" -#include "exec/cpu-common.h" #include "exec/exec-all.h" #include "exec/tb-flush.h" diff --git a/qapi/ui.json b/qapi/ui.json index 006616aa77..a0158baf23 100644 --- a/qapi/ui.json +++ b/qapi/ui.json @@ -200,7 +200,8 @@ { 'command': 'screendump', 'data': {'filename': 'str', '*device': 'str', '*head': 'int', '*format': 'ImageFormat'}, - 'coroutine': true } + 'coroutine': true, + 'if': 'CONFIG_PIXMAN' } ## # == Spice @@ -1409,13 +1410,18 @@ # codes match their position on non-Mac keyboards and you can use # Meta/Super and Alt where you expect them. (default: off) # +# @zoom-to-fit: Zoom guest display to fit into the host window. When +# turned off the host window will be resized instead. Defaults to +# "off". (Since 8.2) +# # Since: 7.0 ## { 'struct': 'DisplayCocoa', 'data': { '*left-command-key': 'bool', '*full-grab': 'bool', - '*swap-opt-cmd': 'bool' + '*swap-opt-cmd': 'bool', + '*zoom-to-fit': 'bool' } } ## diff --git a/qemu-options.hx b/qemu-options.hx index 7809036d8c..42fd09e4de 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -2428,8 +2428,10 @@ SRST OBP. ERST +#ifdef CONFIG_VNC DEF("vnc", HAS_ARG, QEMU_OPTION_vnc , "-vnc <display> shorthand for -display vnc=<display>\n", QEMU_ARCH_ALL) +#endif SRST ``-vnc display[,option[,option[,...]]]`` Normally, if QEMU is compiled with graphical window support, it @@ -3986,14 +3988,22 @@ ERST DEF("initrd", HAS_ARG, QEMU_OPTION_initrd, \ "-initrd file use 'file' as initial ram disk\n", QEMU_ARCH_ALL) SRST + ``-initrd file`` Use file as initial ram disk. ``-initrd "file1 arg=foo,file2"`` This syntax is only available with multiboot. - Use file1 and file2 as modules and pass arg=foo as parameter to the - first module. + Use file1 and file2 as modules and pass ``arg=foo`` as parameter to the + first module. Commas can be provided in module parameters by doubling + them on the command line to escape them: + +``-initrd "bzImage earlyprintk=xen,,keep root=/dev/xvda1,initrd.img"`` + Multiboot only. Use bzImage as the first module with + "``earlyprintk=xen,keep root=/dev/xvda1``" as its command line, + and initrd.img as the second module. + ERST DEF("dtb", HAS_ARG, QEMU_OPTION_dtb, \ diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh index e9d6d39279..680fa3f581 100644 --- a/scripts/meson-buildoptions.sh +++ b/scripts/meson-buildoptions.sh @@ -160,6 +160,7 @@ meson_options_help() { printf "%s\n" ' pa PulseAudio sound support' printf "%s\n" ' parallels parallels image format support' printf "%s\n" ' pipewire PipeWire sound support' + printf "%s\n" ' pixman pixman support' printf "%s\n" ' plugins TCG plugins via shared library loading' printf "%s\n" ' png PNG support with libpng' printf "%s\n" ' pvrdma Enable PVRDMA support' @@ -419,6 +420,8 @@ _meson_option_parse() { --disable-parallels) printf "%s" -Dparallels=disabled ;; --enable-pipewire) printf "%s" -Dpipewire=enabled ;; --disable-pipewire) printf "%s" -Dpipewire=disabled ;; + --enable-pixman) printf "%s" -Dpixman=enabled ;; + --disable-pixman) printf "%s" -Dpixman=disabled ;; --with-pkgversion=*) quote_sh "-Dpkgversion=$2" ;; --enable-plugins) printf "%s" -Dplugins=true ;; --disable-plugins) printf "%s" -Dplugins=false ;; diff --git a/system/cpus.c b/system/cpus.c index 0848e0dbdb..a444a747f0 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -201,6 +201,13 @@ bool cpus_are_resettable(void) return true; } +void cpu_exec_reset_hold(CPUState *cpu) +{ + if (cpus_accel->cpu_reset_hold) { + cpus_accel->cpu_reset_hold(cpu); + } +} + int64_t cpus_get_virtual_clock(void) { /* @@ -624,7 +631,7 @@ void qemu_init_vcpu(CPUState *cpu) { MachineState *ms = MACHINE(qdev_get_machine()); - cpu->nr_cores = ms->smp.cores; + cpu->nr_cores = machine_topo_get_cores_per_socket(ms); cpu->nr_threads = ms->smp.threads; cpu->stopped = true; cpu->random_seed = qemu_guest_random_seed_thread_part1(); diff --git a/system/qdev-monitor.c b/system/qdev-monitor.c index 1b8005ae55..a13db763e5 100644 --- a/system/qdev-monitor.c +++ b/system/qdev-monitor.c @@ -111,6 +111,8 @@ static const QDevAlias qdev_alias_table[] = { { "virtio-serial-device", "virtio-serial", QEMU_ARCH_VIRTIO_MMIO }, { "virtio-serial-ccw", "virtio-serial", QEMU_ARCH_VIRTIO_CCW }, { "virtio-serial-pci", "virtio-serial", QEMU_ARCH_VIRTIO_PCI}, + { "virtio-sound-device", "virtio-sound", QEMU_ARCH_VIRTIO_MMIO }, + { "virtio-sound-pci", "virtio-sound", QEMU_ARCH_VIRTIO_PCI }, { "virtio-tablet-device", "virtio-tablet", QEMU_ARCH_VIRTIO_MMIO }, { "virtio-tablet-ccw", "virtio-tablet", QEMU_ARCH_VIRTIO_CCW }, { "virtio-tablet-pci", "virtio-tablet", QEMU_ARCH_VIRTIO_PCI }, diff --git a/system/vl.c b/system/vl.c index 3fb569254a..5af7ced2a1 100644 --- a/system/vl.c +++ b/system/vl.c @@ -194,7 +194,7 @@ static int default_sdcard = 1; static int default_vga = 1; static int default_net = 1; -static struct { +static const struct { const char *driver; int *flag; } default_list[] = { @@ -1095,13 +1095,14 @@ DisplayOptions *qmp_query_display_options(Error **errp) static void parse_display(const char *p) { - const char *opts; - if (is_help_option(p)) { qemu_display_help(); exit(0); } +#ifdef CONFIG_VNC + const char *opts; + if (strstart(p, "vnc", &opts)) { /* * vnc isn't a (local) DisplayType but a protocol for remote @@ -1113,9 +1114,11 @@ static void parse_display(const char *p) error_report("VNC requires a display argument vnc=<display>"); exit(1); } - } else { - parse_display_qapi(p); + return; } +#endif + + parse_display_qapi(p); } static inline bool nonempty_str(const char *str) @@ -1349,9 +1352,27 @@ static void qemu_disable_default_devices(void) } } +static void qemu_setup_display(void) +{ + if (dpy.type == DISPLAY_TYPE_DEFAULT && !display_remote) { + if (!qemu_display_find_default(&dpy)) { + dpy.type = DISPLAY_TYPE_NONE; +#if defined(CONFIG_VNC) + vnc_parse("localhost:0,to=99,id=default"); +#endif + } + } + if (dpy.type == DISPLAY_TYPE_DEFAULT) { + dpy.type = DISPLAY_TYPE_NONE; + } + + qemu_display_early_init(&dpy); +} + static void qemu_create_default_devices(void) { MachineClass *machine_class = MACHINE_GET_CLASS(current_machine); + const char *vc = qemu_display_get_vc(&dpy); if (is_daemonized()) { /* According to documentation and historically, -nographic redirects @@ -1370,24 +1391,30 @@ static void qemu_create_default_devices(void) } } - if (nographic) { - if (default_parallel) + if (nographic || (!vc && !is_daemonized() && isatty(STDOUT_FILENO))) { + if (default_parallel) { add_device_config(DEV_PARALLEL, "null"); + } if (default_serial && default_monitor) { add_device_config(DEV_SERIAL, "mon:stdio"); } else { - if (default_serial) + if (default_serial) { add_device_config(DEV_SERIAL, "stdio"); - if (default_monitor) + } + if (default_monitor) { monitor_parse("stdio", "readline", false); + } } } else { - if (default_serial) - add_device_config(DEV_SERIAL, "vc:80Cx24C"); - if (default_parallel) - add_device_config(DEV_PARALLEL, "vc:80Cx24C"); - if (default_monitor) - monitor_parse("vc:80Cx24C", "readline", false); + if (default_serial) { + add_device_config(DEV_SERIAL, vc ?: "null"); + } + if (default_parallel) { + add_device_config(DEV_PARALLEL, vc ?: "null"); + } + if (default_monitor && vc) { + monitor_parse(vc, "readline", false); + } } if (default_net) { @@ -1398,23 +1425,6 @@ static void qemu_create_default_devices(void) #endif } -#if defined(CONFIG_VNC) - if (!QTAILQ_EMPTY(&(qemu_find_opts("vnc")->head))) { - display_remote++; - } -#endif - if (dpy.type == DISPLAY_TYPE_DEFAULT && !display_remote) { - if (!qemu_display_find_default(&dpy)) { - dpy.type = DISPLAY_TYPE_NONE; -#if defined(CONFIG_VNC) - vnc_parse("localhost:0,to=99,id=default"); -#endif - } - } - if (dpy.type == DISPLAY_TYPE_DEFAULT) { - dpy.type = DISPLAY_TYPE_NONE; - } - /* If no default VGA is requested, the default is "none". */ if (default_vga) { vga_model = get_default_vga_model(machine_class); @@ -1529,7 +1539,8 @@ static gint machine_class_cmp(gconstpointer a, gconstpointer b) static void machine_help_func(const QDict *qdict) { - GSList *machines, *el; + g_autoptr(GSList) machines = NULL; + GSList *el; const char *type = qdict_get_try_str(qdict, "type"); machines = object_class_get_list(TYPE_MACHINE, false); @@ -1939,7 +1950,6 @@ static void qemu_create_early_backends(void) "ignoring option"); } - qemu_display_early_init(&dpy); qemu_console_early_init(); if (dpy.has_gl && dpy.gl != DISPLAYGL_MODE_OFF && display_opengl == 0) { @@ -3344,9 +3354,12 @@ void qemu_init(int argc, char **argv) machine_parse_property_opt(qemu_find_opts("smp-opts"), "smp", optarg); break; +#ifdef CONFIG_VNC case QEMU_OPTION_vnc: vnc_parse(optarg); + display_remote++; break; +#endif case QEMU_OPTION_no_acpi: warn_report("-no-acpi is deprecated, use '-machine acpi=off' instead"); qdict_put_str(machine_opts_dict, "acpi", "off"); @@ -3475,12 +3488,7 @@ void qemu_init(int argc, char **argv) break; #ifdef CONFIG_SPICE case QEMU_OPTION_spice: - olist = qemu_find_opts_err("spice", NULL); - if (!olist) { - error_report("spice support is disabled"); - exit(1); - } - opts = qemu_opts_parse_noisily(olist, optarg, false); + opts = qemu_opts_parse_noisily(qemu_find_opts("spice"), optarg, false); if (!opts) { exit(1); } @@ -3670,6 +3678,7 @@ void qemu_init(int argc, char **argv) suspend_mux_open(); qemu_disable_default_devices(); + qemu_setup_display(); qemu_create_default_devices(); qemu_create_early_backends(); diff --git a/target/alpha/cpu-qom.h b/target/alpha/cpu-qom.h index 1f200724b6..1b32b18d34 100644 --- a/target/alpha/cpu-qom.h +++ b/target/alpha/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU Alpha CPU + * QEMU Alpha CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -21,27 +21,12 @@ #define QEMU_ALPHA_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_ALPHA_CPU "alpha-cpu" OBJECT_DECLARE_CPU_TYPE(AlphaCPU, AlphaCPUClass, ALPHA_CPU) -/** - * AlphaCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. - * - * An Alpha CPU model. - */ -struct AlphaCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - DeviceReset parent_reset; -}; - +#define ALPHA_CPU_TYPE_SUFFIX "-" TYPE_ALPHA_CPU +#define ALPHA_CPU_TYPE_NAME(model) model ALPHA_CPU_TYPE_SUFFIX #endif diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 51b7d8d1bf..39cf841b3e 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -126,8 +126,7 @@ static ObjectClass *alpha_cpu_class_by_name(const char *cpu_model) int i; oc = object_class_by_name(cpu_model); - if (oc != NULL && object_class_dynamic_cast(oc, TYPE_ALPHA_CPU) != NULL && - !object_class_is_abstract(oc)) { + if (oc != NULL && object_class_dynamic_cast(oc, TYPE_ALPHA_CPU) != NULL) { return oc; } @@ -142,13 +141,10 @@ static ObjectClass *alpha_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(ALPHA_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && object_class_is_abstract(oc)) { - oc = NULL; - } /* TODO: remove match everything nonsense */ - /* Default to ev67; no reason not to emulate insns by default. */ - if (!oc) { + if (!oc || object_class_is_abstract(oc)) { + /* Default to ev67; no reason not to emulate insns by default. */ oc = object_class_by_name(ALPHA_CPU_TYPE_NAME("ev67")); } diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index e2a467ec17..d672e911dd 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -259,9 +259,7 @@ typedef struct CPUArchState { * An Alpha CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUAlphaState env; @@ -269,6 +267,19 @@ struct ArchCPU { QEMUTimer *alarm_timer; }; +/** + * AlphaCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * An Alpha CPU model. + */ +struct AlphaCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + DeviceReset parent_reset; +}; #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_alpha_cpu; @@ -428,8 +439,6 @@ enum { void alpha_translate_init(void); -#define ALPHA_CPU_TYPE_SUFFIX "-" TYPE_ALPHA_CPU -#define ALPHA_CPU_TYPE_NAME(model) model ALPHA_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU void alpha_cpu_list(void); diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h index d06c08a734..02b914c876 100644 --- a/target/arm/cpu-qom.h +++ b/target/arm/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU ARM CPU + * QEMU ARM CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -21,7 +21,6 @@ #define QEMU_ARM_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_ARM_CPU "arm-cpu" @@ -29,67 +28,9 @@ OBJECT_DECLARE_CPU_TYPE(ARMCPU, ARMCPUClass, ARM_CPU) #define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU -typedef struct ARMCPUInfo { - const char *name; - void (*initfn)(Object *obj); - void (*class_init)(ObjectClass *oc, void *data); -} ARMCPUInfo; - -void arm_cpu_register(const ARMCPUInfo *info); -void aarch64_cpu_register(const ARMCPUInfo *info); - -/** - * ARMCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * An ARM CPU model. - */ -struct ARMCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - const ARMCPUInfo *info; - DeviceRealize parent_realize; - ResettablePhases parent_phases; -}; - - #define TYPE_AARCH64_CPU "aarch64-cpu" typedef struct AArch64CPUClass AArch64CPUClass; DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU, TYPE_AARCH64_CPU) -struct AArch64CPUClass { - /*< private >*/ - ARMCPUClass parent_class; - /*< public >*/ -}; - -void register_cp_regs_for_features(ARMCPU *cpu); -void init_cpreg_list(ARMCPU *cpu); - -/* Callback functions for the generic timer's timers. */ -void arm_gt_ptimer_cb(void *opaque); -void arm_gt_vtimer_cb(void *opaque); -void arm_gt_htimer_cb(void *opaque); -void arm_gt_stimer_cb(void *opaque); -void arm_gt_hvtimer_cb(void *opaque); - -#define ARM_AFF0_SHIFT 0 -#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT) -#define ARM_AFF1_SHIFT 8 -#define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT) -#define ARM_AFF2_SHIFT 16 -#define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT) -#define ARM_AFF3_SHIFT 32 -#define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT) -#define ARM_DEFAULT_CPUS_PER_CLUSTER 8 - -#define ARM32_AFFINITY_MASK (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK) -#define ARM64_AFFINITY_MASK \ - (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK|ARM_AFF3_MASK) -#define ARM64_AFFINITY_INVALID (~ARM64_AFFINITY_MASK) - #endif diff --git a/target/arm/cpu.c b/target/arm/cpu.c index df6496b019..25e9d2ae7b 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -2401,8 +2401,7 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_strfreev(cpuname); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) || - object_class_is_abstract(oc)) { + if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU)) { return NULL; } return oc; diff --git a/target/arm/cpu.h b/target/arm/cpu.h index d51dfe48db..a0282e0d28 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -852,9 +852,7 @@ typedef struct { * An ARM CPU core. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUARMState env; @@ -1118,11 +1116,58 @@ struct ArchCPU { uint64_t gt_cntfrq_hz; }; +typedef struct ARMCPUInfo { + const char *name; + void (*initfn)(Object *obj); + void (*class_init)(ObjectClass *oc, void *data); +} ARMCPUInfo; + +/** + * ARMCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * An ARM CPU model. + */ +struct ARMCPUClass { + CPUClass parent_class; + + const ARMCPUInfo *info; + DeviceRealize parent_realize; + ResettablePhases parent_phases; +}; + +struct AArch64CPUClass { + ARMCPUClass parent_class; +}; + +/* Callback functions for the generic timer's timers. */ +void arm_gt_ptimer_cb(void *opaque); +void arm_gt_vtimer_cb(void *opaque); +void arm_gt_htimer_cb(void *opaque); +void arm_gt_stimer_cb(void *opaque); +void arm_gt_hvtimer_cb(void *opaque); + unsigned int gt_cntfrq_period_ns(ARMCPU *cpu); void gt_rme_post_el_change(ARMCPU *cpu, void *opaque); void arm_cpu_post_init(Object *obj); +#define ARM_AFF0_SHIFT 0 +#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT) +#define ARM_AFF1_SHIFT 8 +#define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT) +#define ARM_AFF2_SHIFT 16 +#define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT) +#define ARM_AFF3_SHIFT 32 +#define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT) +#define ARM_DEFAULT_CPUS_PER_CLUSTER 8 + +#define ARM32_AFFINITY_MASK (ARM_AFF0_MASK | ARM_AFF1_MASK | ARM_AFF2_MASK) +#define ARM64_AFFINITY_MASK \ + (ARM_AFF0_MASK | ARM_AFF1_MASK | ARM_AFF2_MASK | ARM_AFF3_MASK) +#define ARM64_AFFINITY_INVALID (~ARM64_AFFINITY_MASK) + uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz); #ifndef CONFIG_USER_ONLY diff --git a/target/arm/internals.h b/target/arm/internals.h index c837506e44..143d57c0fe 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -183,6 +183,12 @@ static inline int r14_bank_number(int mode) return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); } +void arm_cpu_register(const ARMCPUInfo *info); +void aarch64_cpu_register(const ARMCPUInfo *info); + +void register_cp_regs_for_features(ARMCPU *cpu); +void init_cpreg_list(ARMCPU *cpu); + void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); void arm_translate_init(void); diff --git a/target/avr/cpu-qom.h b/target/avr/cpu-qom.h index 01ea5f160b..38dbcc0535 100644 --- a/target/avr/cpu-qom.h +++ b/target/avr/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU AVR CPU + * QEMU AVR CPU QOM header (target agnostic) * * Copyright (c) 2016-2020 Michael Rolnik * @@ -22,26 +22,12 @@ #define TARGET_AVR_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_AVR_CPU "avr-cpu" OBJECT_DECLARE_CPU_TYPE(AVRCPU, AVRCPUClass, AVR_CPU) -/** - * AVRCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * A AVR CPU model. - */ -struct AVRCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - DeviceRealize parent_realize; - ResettablePhases parent_phases; -}; - +#define AVR_CPU_TYPE_SUFFIX "-" TYPE_AVR_CPU +#define AVR_CPU_TYPE_NAME(name) (name AVR_CPU_TYPE_SUFFIX) #endif /* TARGET_AVR_CPU_QOM_H */ diff --git a/target/avr/cpu.c b/target/avr/cpu.c index 14d8b9d1f0..44de1e18d1 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -157,8 +157,7 @@ static ObjectClass *avr_cpu_class_by_name(const char *cpu_model) ObjectClass *oc; oc = object_class_by_name(cpu_model); - if (object_class_dynamic_cast(oc, TYPE_AVR_CPU) == NULL || - object_class_is_abstract(oc)) { + if (object_class_dynamic_cast(oc, TYPE_AVR_CPU) == NULL) { oc = NULL; } return oc; diff --git a/target/avr/cpu.h b/target/avr/cpu.h index 4ce22d8e4f..8a17862737 100644 --- a/target/avr/cpu.h +++ b/target/avr/cpu.h @@ -28,8 +28,6 @@ #error "AVR 8-bit does not support user mode" #endif -#define AVR_CPU_TYPE_SUFFIX "-" TYPE_AVR_CPU -#define AVR_CPU_TYPE_NAME(name) (name AVR_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_AVR_CPU #define TCG_GUEST_DEFAULT_MO 0 @@ -144,13 +142,25 @@ typedef struct CPUArchState { * A AVR CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUAVRState env; }; +/** + * AVRCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * A AVR CPU model. + */ +struct AVRCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; +}; + extern const struct VMStateDescription vms_avr_cpu; void avr_cpu_do_interrupt(CPUState *cpu); diff --git a/target/cris/cpu-qom.h b/target/cris/cpu-qom.h index 431a1d536a..741ca97a1b 100644 --- a/target/cris/cpu-qom.h +++ b/target/cris/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU CRIS CPU + * QEMU CRIS CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -21,30 +21,12 @@ #define QEMU_CRIS_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_CRIS_CPU "cris-cpu" OBJECT_DECLARE_CPU_TYPE(CRISCPU, CRISCPUClass, CRIS_CPU) -/** - * CRISCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * @vr: Version Register value. - * - * A CRIS CPU model. - */ -struct CRISCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - ResettablePhases parent_phases; - - uint32_t vr; -}; - +#define CRIS_CPU_TYPE_SUFFIX "-" TYPE_CRIS_CPU +#define CRIS_CPU_TYPE_NAME(name) (name CRIS_CPU_TYPE_SUFFIX) #endif diff --git a/target/cris/cpu.c b/target/cris/cpu.c index be4a44c218..675b73ac04 100644 --- a/target/cris/cpu.c +++ b/target/cris/cpu.c @@ -95,8 +95,7 @@ static ObjectClass *cris_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(CRIS_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_CRIS_CPU) || - object_class_is_abstract(oc))) { + if (oc != NULL && !object_class_dynamic_cast(oc, TYPE_CRIS_CPU)) { oc = NULL; } return oc; diff --git a/target/cris/cpu.h b/target/cris/cpu.h index 676b8e93ca..1be7f90319 100644 --- a/target/cris/cpu.h +++ b/target/cris/cpu.h @@ -174,13 +174,27 @@ typedef struct CPUArchState { * A CRIS CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUCRISState env; }; +/** + * CRISCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * @vr: Version Register value. + * + * A CRIS CPU model. + */ +struct CRISCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; + + uint32_t vr; +}; #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_cris_cpu; @@ -242,8 +256,6 @@ enum { /* CRIS uses 8k pages. */ #define MMAP_SHIFT TARGET_PAGE_BITS -#define CRIS_CPU_TYPE_SUFFIX "-" TYPE_CRIS_CPU -#define CRIS_CPU_TYPE_NAME(name) (name CRIS_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_CRIS_CPU /* MMU modes definitions */ diff --git a/target/hexagon/cpu-qom.h b/target/hexagon/cpu-qom.h new file mode 100644 index 0000000000..da92fe7468 --- /dev/null +++ b/target/hexagon/cpu-qom.h @@ -0,0 +1,27 @@ +/* + * QEMU Hexagon CPU QOM header (target agnostic) + * + * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_HEXAGON_CPU_QOM_H +#define QEMU_HEXAGON_CPU_QOM_H + +#include "hw/core/cpu.h" + +#define TYPE_HEXAGON_CPU "hexagon-cpu" + +#define HEXAGON_CPU_TYPE_SUFFIX "-" TYPE_HEXAGON_CPU +#define HEXAGON_CPU_TYPE_NAME(name) (name HEXAGON_CPU_TYPE_SUFFIX) + +#define TYPE_HEXAGON_CPU_V67 HEXAGON_CPU_TYPE_NAME("v67") +#define TYPE_HEXAGON_CPU_V68 HEXAGON_CPU_TYPE_NAME("v68") +#define TYPE_HEXAGON_CPU_V69 HEXAGON_CPU_TYPE_NAME("v69") +#define TYPE_HEXAGON_CPU_V71 HEXAGON_CPU_TYPE_NAME("v71") +#define TYPE_HEXAGON_CPU_V73 HEXAGON_CPU_TYPE_NAME("v73") + +OBJECT_DECLARE_CPU_TYPE(HexagonCPU, HexagonCPUClass, HEXAGON_CPU) + +#endif diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 1adc11b713..9d1ffc3b4b 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -63,8 +63,7 @@ static ObjectClass *hexagon_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_strfreev(cpuname); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_HEXAGON_CPU) || - object_class_is_abstract(oc)) { + if (!oc || !object_class_dynamic_cast(oc, TYPE_HEXAGON_CPU)) { return NULL; } return oc; diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index 10cd1efd57..7d16083c6a 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -20,11 +20,10 @@ #include "fpu/softfloat-types.h" +#include "cpu-qom.h" #include "exec/cpu-defs.h" #include "hex_regs.h" #include "mmvec/mmvec.h" -#include "qom/object.h" -#include "hw/core/cpu.h" #include "hw/registerfields.h" #define NUM_PREGS 4 @@ -36,18 +35,8 @@ #define PRED_WRITES_MAX 5 /* 4 insns + endloop */ #define VSTORES_MAX 2 -#define TYPE_HEXAGON_CPU "hexagon-cpu" - -#define HEXAGON_CPU_TYPE_SUFFIX "-" TYPE_HEXAGON_CPU -#define HEXAGON_CPU_TYPE_NAME(name) (name HEXAGON_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_HEXAGON_CPU -#define TYPE_HEXAGON_CPU_V67 HEXAGON_CPU_TYPE_NAME("v67") -#define TYPE_HEXAGON_CPU_V68 HEXAGON_CPU_TYPE_NAME("v68") -#define TYPE_HEXAGON_CPU_V69 HEXAGON_CPU_TYPE_NAME("v69") -#define TYPE_HEXAGON_CPU_V71 HEXAGON_CPU_TYPE_NAME("v71") -#define TYPE_HEXAGON_CPU_V73 HEXAGON_CPU_TYPE_NAME("v73") - void hexagon_cpu_list(void); #define cpu_list hexagon_cpu_list @@ -127,20 +116,15 @@ typedef struct CPUArchState { VTCMStoreLog vtcm_log; } CPUHexagonState; -OBJECT_DECLARE_CPU_TYPE(HexagonCPU, HexagonCPUClass, HEXAGON_CPU) - typedef struct HexagonCPUClass { - /*< private >*/ CPUClass parent_class; - /*< public >*/ + DeviceRealize parent_realize; ResettablePhases parent_phases; } HexagonCPUClass; struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUHexagonState env; diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h index c2791ae5f2..6746869a3b 100644 --- a/target/hppa/cpu-param.h +++ b/target/hppa/cpu-param.h @@ -8,26 +8,16 @@ #ifndef HPPA_CPU_PARAM_H #define HPPA_CPU_PARAM_H -#ifdef TARGET_HPPA64 -# define TARGET_LONG_BITS 64 -# define TARGET_REGISTER_BITS 64 -# define TARGET_VIRT_ADDR_SPACE_BITS 64 -# define TARGET_PHYS_ADDR_SPACE_BITS 64 -#elif defined(CONFIG_USER_ONLY) -# define TARGET_LONG_BITS 32 -# define TARGET_REGISTER_BITS 32 -# define TARGET_VIRT_ADDR_SPACE_BITS 32 +#define TARGET_LONG_BITS 64 + +#if defined(CONFIG_USER_ONLY) && defined(TARGET_ABI32) # define TARGET_PHYS_ADDR_SPACE_BITS 32 +# define TARGET_VIRT_ADDR_SPACE_BITS 32 #else -/* - * In order to form the GVA from space:offset, - * we need a 64-bit virtual address space. - */ -# define TARGET_LONG_BITS 64 -# define TARGET_REGISTER_BITS 32 +# define TARGET_PHYS_ADDR_SPACE_BITS 64 # define TARGET_VIRT_ADDR_SPACE_BITS 64 -# define TARGET_PHYS_ADDR_SPACE_BITS 32 #endif + #define TARGET_PAGE_BITS 12 #endif diff --git a/target/hppa/cpu-qom.h b/target/hppa/cpu-qom.h index b96e0318c7..5c454bf543 100644 --- a/target/hppa/cpu-qom.h +++ b/target/hppa/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU HPPA CPU + * QEMU HPPA CPU QOM header (target agnostic) * * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> * @@ -21,27 +21,10 @@ #define QEMU_HPPA_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_HPPA_CPU "hppa-cpu" +#define TYPE_HPPA64_CPU "hppa64-cpu" OBJECT_DECLARE_CPU_TYPE(HPPACPU, HPPACPUClass, HPPA_CPU) -/** - * HPPACPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. - * - * An HPPA CPU model. - */ -struct HPPACPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - DeviceReset parent_reset; -}; - - #endif diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index 1644297bf8..04de1689d7 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -77,9 +77,10 @@ static void hppa_restore_state_to_opc(CPUState *cs, HPPACPU *cpu = HPPA_CPU(cs); cpu->env.iaoq_f = data[0]; - if (data[1] != (target_ureg)-1) { + if (data[1] != (target_ulong)-1) { cpu->env.iaoq_b = data[1]; } + cpu->env.unwind_breg = data[2]; /* * Since we were executing the instruction at IAOQ_F, and took some * sort of action that provoked the cpu_restore_state, we can infer @@ -137,8 +138,10 @@ static void hppa_cpu_realizefn(DeviceState *dev, Error **errp) #ifndef CONFIG_USER_ONLY { HPPACPU *cpu = HPPA_CPU(cs); + cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hppa_cpu_alarm_timer, cpu); + hppa_ptlbe(&cpu->env); } #endif } @@ -156,7 +159,39 @@ static void hppa_cpu_initfn(Object *obj) static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model) { - return object_class_by_name(TYPE_HPPA_CPU); + g_autofree char *typename = g_strconcat(cpu_model, "-cpu", NULL); + ObjectClass *oc = object_class_by_name(typename); + + if (oc && + !object_class_is_abstract(oc) && + object_class_dynamic_cast(oc, TYPE_HPPA_CPU)) { + return oc; + } + return NULL; +} + +static void hppa_cpu_list_entry(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CPUClass *cc = CPU_CLASS(oc); + const char *tname = object_class_get_name(oc); + g_autofree char *name = g_strndup(tname, strchr(tname, '-') - tname); + + if (cc->deprecation_note) { + qemu_printf(" %s (deprecated)\n", name); + } else { + qemu_printf(" %s\n", name); + } +} + +void hppa_cpu_list(void) +{ + GSList *list; + + list = object_class_get_list_sorted(TYPE_HPPA_CPU, false); + qemu_printf("Available CPUs:\n"); + g_slist_foreach(list, hppa_cpu_list_entry, NULL); + g_slist_free(list); } #ifndef CONFIG_USER_ONLY @@ -207,20 +242,21 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data) cc->tcg_ops = &hppa_tcg_ops; } -static const TypeInfo hppa_cpu_type_info = { - .name = TYPE_HPPA_CPU, - .parent = TYPE_CPU, - .instance_size = sizeof(HPPACPU), - .instance_align = __alignof(HPPACPU), - .instance_init = hppa_cpu_initfn, - .abstract = false, - .class_size = sizeof(HPPACPUClass), - .class_init = hppa_cpu_class_init, +static const TypeInfo hppa_cpu_type_infos[] = { + { + .name = TYPE_HPPA_CPU, + .parent = TYPE_CPU, + .instance_size = sizeof(HPPACPU), + .instance_align = __alignof(HPPACPU), + .instance_init = hppa_cpu_initfn, + .abstract = false, + .class_size = sizeof(HPPACPUClass), + .class_init = hppa_cpu_class_init, + }, + { + .name = TYPE_HPPA64_CPU, + .parent = TYPE_HPPA_CPU, + }, }; -static void hppa_cpu_register_types(void) -{ - type_register_static(&hppa_cpu_type_info); -} - -type_init(hppa_cpu_register_types) +DEFINE_TYPES(hppa_cpu_type_infos) diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 798d0c26d7..cecec59700 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -23,6 +23,7 @@ #include "cpu-qom.h" #include "exec/cpu-defs.h" #include "qemu/cpu-float.h" +#include "qemu/interval-tree.h" /* PA-RISC 1.x processors have a strong memory model. */ /* ??? While we do not yet implement PA-RISC 2.0, those processors have @@ -30,21 +31,33 @@ basis. It's probably easier to fall back to a strong memory model. */ #define TCG_GUEST_DEFAULT_MO TCG_MO_ALL -#define MMU_KERNEL_IDX 11 -#define MMU_PL1_IDX 12 -#define MMU_PL2_IDX 13 -#define MMU_USER_IDX 14 -#define MMU_PHYS_IDX 15 +#define MMU_KERNEL_IDX 7 +#define MMU_KERNEL_P_IDX 8 +#define MMU_PL1_IDX 9 +#define MMU_PL1_P_IDX 10 +#define MMU_PL2_IDX 11 +#define MMU_PL2_P_IDX 12 +#define MMU_USER_IDX 13 +#define MMU_USER_P_IDX 14 +#define MMU_PHYS_IDX 15 -#define PRIV_TO_MMU_IDX(priv) (MMU_KERNEL_IDX + (priv)) -#define MMU_IDX_TO_PRIV(mmu_idx) ((mmu_idx) - MMU_KERNEL_IDX) +#define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2) +#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1) +#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX) -#define TARGET_INSN_START_EXTRA_WORDS 1 +#define TARGET_INSN_START_EXTRA_WORDS 2 /* No need to flush MMU_PHYS_IDX */ #define HPPA_MMU_FLUSH_MASK \ - (1 << MMU_KERNEL_IDX | 1 << MMU_PL1_IDX | \ - 1 << MMU_PL2_IDX | 1 << MMU_USER_IDX) + (1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \ + 1 << MMU_PL1_IDX | 1 << MMU_PL1_P_IDX | \ + 1 << MMU_PL2_IDX | 1 << MMU_PL2_P_IDX | \ + 1 << MMU_USER_IDX | 1 << MMU_USER_P_IDX) + +/* Indicies to flush for access_id changes. */ +#define HPPA_MMU_FLUSH_P_MASK \ + (1 << MMU_KERNEL_P_IDX | 1 << MMU_PL1_P_IDX | \ + 1 << MMU_PL2_P_IDX | 1 << MMU_USER_P_IDX) /* Hardware exceptions, interrupts, faults, and traps. */ #define EXCP_HPMC 1 /* high priority machine check */ @@ -107,11 +120,7 @@ #define PSW_T 0x01000000 #define PSW_S 0x02000000 #define PSW_E 0x04000000 -#ifdef TARGET_HPPA64 #define PSW_W 0x08000000 /* PA2.0 only */ -#else -#define PSW_W 0 -#endif #define PSW_Z 0x40000000 /* PA1.x only */ #define PSW_Y 0x80000000 /* PA1.x only */ @@ -124,15 +133,12 @@ #define PSW_SM_P PSW_P #define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */ #define PSW_SM_R PSW_R /* Enable Recover Counter Trap */ -#ifdef TARGET_HPPA64 #define PSW_SM_E 0x100 #define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */ -#else -#define PSW_SM_E 0 -#define PSW_SM_W 0 -#endif #define CR_RC 0 +#define CR_PSW_DEFAULT 6 /* see SeaBIOS PDC_PSW firmware call */ +#define PDC_PSW_WIDE_BIT 2 #define CR_PID1 8 #define CR_PID2 9 #define CR_PID3 12 @@ -150,45 +156,37 @@ #define CR_IPSW 22 #define CR_EIRR 23 -#if TARGET_REGISTER_BITS == 32 -typedef uint32_t target_ureg; -typedef int32_t target_sreg; -#define TREG_FMT_lx "%08"PRIx32 -#define TREG_FMT_ld "%"PRId32 -#else -typedef uint64_t target_ureg; -typedef int64_t target_sreg; -#define TREG_FMT_lx "%016"PRIx64 -#define TREG_FMT_ld "%"PRId64 -#endif +typedef struct HPPATLBEntry { + union { + IntervalTreeNode itree; + struct HPPATLBEntry *unused_next; + }; + + target_ulong pa; + + unsigned entry_valid : 1; -typedef struct { - uint64_t va_b; - uint64_t va_e; - target_ureg pa; unsigned u : 1; unsigned t : 1; unsigned d : 1; unsigned b : 1; - unsigned page_size : 4; unsigned ar_type : 3; unsigned ar_pl1 : 2; unsigned ar_pl2 : 2; - unsigned entry_valid : 1; unsigned access_id : 16; -} hppa_tlb_entry; +} HPPATLBEntry; typedef struct CPUArchState { - target_ureg iaoq_f; /* front */ - target_ureg iaoq_b; /* back, aka next instruction */ + target_ulong iaoq_f; /* front */ + target_ulong iaoq_b; /* back, aka next instruction */ - target_ureg gr[32]; + target_ulong gr[32]; uint64_t fr[32]; uint64_t sr[8]; /* stored shifted into place for gva */ - target_ureg psw; /* All psw bits except the following: */ - target_ureg psw_n; /* boolean */ - target_sreg psw_v; /* in most significant bit */ + target_ulong psw; /* All psw bits except the following: */ + target_ulong psw_n; /* boolean */ + target_long psw_v; /* in most significant bit */ /* Splitting the carry-borrow field into the MSB and "the rest", allows * for "the rest" to be deleted when it is unused, but the MSB is in use. @@ -197,8 +195,8 @@ typedef struct CPUArchState { * host has the appropriate add-with-carry insn to compute the msb). * Therefore the carry bits are stored as: cb_msb : cb & 0x11111110. */ - target_ureg psw_cb; /* in least significant bit of next nibble */ - target_ureg psw_cb_msb; /* boolean */ + target_ulong psw_cb; /* in least significant bit of next nibble */ + target_ulong psw_cb_msb; /* boolean */ uint64_t iasq_f; uint64_t iasq_b; @@ -206,24 +204,40 @@ typedef struct CPUArchState { uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */ float_status fp_status; - target_ureg cr[32]; /* control registers */ - target_ureg cr_back[2]; /* back of cr17/cr18 */ - target_ureg shadow[7]; /* shadow registers */ + target_ulong cr[32]; /* control registers */ + target_ulong cr_back[2]; /* back of cr17/cr18 */ + target_ulong shadow[7]; /* shadow registers */ - /* ??? The number of entries isn't specified by the architecture. */ -#ifdef TARGET_HPPA64 -#define HPPA_BTLB_FIXED 0 /* BTLBs are not supported in 64-bit machines */ -#else -#define HPPA_BTLB_FIXED 16 -#endif -#define HPPA_BTLB_VARIABLE 0 + /* + * During unwind of a memory insn, the base register of the address. + * This is used to construct CR_IOR for pa2.0. + */ + uint32_t unwind_breg; + + /* + * ??? The number of entries isn't specified by the architecture. + * BTLBs are not supported in 64-bit machines. + */ +#define PA10_BTLB_FIXED 16 +#define PA10_BTLB_VARIABLE 0 #define HPPA_TLB_ENTRIES 256 -#define HPPA_BTLB_ENTRIES (HPPA_BTLB_FIXED + HPPA_BTLB_VARIABLE) - /* ??? Implement a unified itlb/dtlb for the moment. */ - /* ??? We should use a more intelligent data structure. */ - hppa_tlb_entry tlb[HPPA_TLB_ENTRIES]; + /* Index for round-robin tlb eviction. */ uint32_t tlb_last; + + /* + * For pa1.x, the partial initialized, still invalid tlb entry + * which has had ITLBA performed, but not yet ITLBP. + */ + HPPATLBEntry *tlb_partial; + + /* Linked list of all invalid (unused) tlb entries. */ + HPPATLBEntry *tlb_unused; + + /* Root of the search tree for all valid tlb entries. */ + IntervalTreeRoot tlb_root; + + HPPATLBEntry tlb[HPPA_TLB_ENTRIES]; } CPUHPPAState; /** @@ -233,23 +247,45 @@ typedef struct CPUArchState { * An HPPA CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUHPPAState env; QEMUTimer *alarm_timer; }; +/** + * HPPACPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * An HPPA CPU model. + */ +struct HPPACPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + DeviceReset parent_reset; +}; + #include "exec/cpu-all.h" +static inline bool hppa_is_pa20(CPUHPPAState *env) +{ + return object_dynamic_cast(OBJECT(env_cpu(env)), TYPE_HPPA64_CPU) != NULL; +} + +static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env) +{ + return hppa_is_pa20(env) ? 0 : PA10_BTLB_FIXED + PA10_BTLB_VARIABLE; +} + static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch) { #ifdef CONFIG_USER_ONLY return MMU_USER_IDX; #else if (env->psw & (ifetch ? PSW_C : PSW_D)) { - return PRIV_TO_MMU_IDX(env->iaoq_f & 3); + return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P); } return MMU_PHYS_IDX; /* mmu disabled */ #endif @@ -259,23 +295,26 @@ void hppa_translate_init(void); #define CPU_RESOLVING_TYPE TYPE_HPPA_CPU -static inline target_ulong hppa_form_gva_psw(target_ureg psw, uint64_t spc, - target_ureg off) +static inline target_ulong hppa_form_gva_psw(target_ulong psw, uint64_t spc, + target_ulong off) { #ifdef CONFIG_USER_ONLY return off; #else - off &= (psw & PSW_W ? 0x3fffffffffffffffull : 0xffffffffull); + off &= psw & PSW_W ? MAKE_64BIT_MASK(0, 62) : MAKE_64BIT_MASK(0, 32); return spc | off; #endif } static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc, - target_ureg off) + target_ulong off) { return hppa_form_gva_psw(env->psw, spc, off); } +hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr); +hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr); + /* * Since PSW_{I,CB} will never need to be in tb->flags, reuse them. * TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the @@ -299,13 +338,12 @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc, *cs_base = env->iaoq_b & -4; flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus; #else - /* ??? E, T, H, L, B, P bits need to be here, when implemented. */ - flags |= env->psw & (PSW_W | PSW_C | PSW_D); + /* ??? E, T, H, L, B bits need to be here, when implemented. */ + flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P); flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT; - *pc = (env->psw & PSW_C - ? hppa_form_gva_psw(env->psw, env->iasq_f, env->iaoq_f & -4) - : env->iaoq_f & -4); + *pc = hppa_form_gva_psw(env->psw, (env->psw & PSW_C ? env->iasq_f : 0), + env->iaoq_f & -4); *cs_base = env->iasq_f; /* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero @@ -313,8 +351,8 @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc, which is the primary case we care about -- using goto_tb within a page. Failure is indicated by a zero difference. */ if (env->iasq_f == env->iasq_b) { - target_sreg diff = env->iaoq_b - env->iaoq_f; - if (TARGET_REGISTER_BITS == 32 || diff == (int32_t)diff) { + target_long diff = env->iaoq_b - env->iaoq_f; + if (diff == (int32_t)diff) { *cs_base |= (uint32_t)diff; } } @@ -328,8 +366,8 @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc, *pflags = flags; } -target_ureg cpu_hppa_get_psw(CPUHPPAState *env); -void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg); +target_ulong cpu_hppa_get_psw(CPUHPPAState *env); +void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong); void cpu_hppa_loaded_fr0(CPUHPPAState *env); #ifdef CONFIG_USER_ONLY @@ -342,6 +380,7 @@ int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void hppa_cpu_dump_state(CPUState *cs, FILE *f, int); #ifndef CONFIG_USER_ONLY +void hppa_ptlbe(CPUHPPAState *env); hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, @@ -350,7 +389,7 @@ void hppa_cpu_do_interrupt(CPUState *cpu); bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, int type, hwaddr *pphys, int *pprot, - hppa_tlb_entry **tlb_entry); + HPPATLBEntry **tlb_entry); extern const MemoryRegionOps hppa_io_eir_ops; extern const VMStateDescription vmstate_hppa_cpu; void hppa_cpu_alarm_timer(void *); @@ -358,4 +397,9 @@ int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr); #endif G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra); +#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU + +#define cpu_list hppa_cpu_list +void hppa_cpu_list(void); + #endif /* HPPA_CPU_H */ diff --git a/target/hppa/gdbstub.c b/target/hppa/gdbstub.c index 48a514384f..4a965b38d7 100644 --- a/target/hppa/gdbstub.c +++ b/target/hppa/gdbstub.c @@ -21,11 +21,16 @@ #include "cpu.h" #include "gdbstub/helpers.h" +/* + * GDB 15 only supports PA1.0 via the remote protocol, and ignores + * any provided xml. Which means that any attempt to provide more + * data results in "Remote 'g' packet reply is too long". + */ + int hppa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { - HPPACPU *cpu = HPPA_CPU(cs); - CPUHPPAState *env = &cpu->env; - target_ureg val; + CPUHPPAState *env = cpu_env(cs); + uint32_t val; switch (n) { case 0: @@ -139,24 +144,13 @@ int hppa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) break; } - if (TARGET_REGISTER_BITS == 64) { - return gdb_get_reg64(mem_buf, val); - } else { - return gdb_get_reg32(mem_buf, val); - } + return gdb_get_reg32(mem_buf, val); } int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - HPPACPU *cpu = HPPA_CPU(cs); - CPUHPPAState *env = &cpu->env; - target_ureg val; - - if (TARGET_REGISTER_BITS == 64) { - val = ldq_p(mem_buf); - } else { - val = ldl_p(mem_buf); - } + CPUHPPAState *env = cpu_env(cs); + uint32_t val = ldl_p(mem_buf); switch (n) { case 0: @@ -166,7 +160,7 @@ int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) env->gr[n] = val; break; case 32: - env->cr[CR_SAR] = val; + env->cr[CR_SAR] = val & (hppa_is_pa20(env) ? 63 : 31); break; case 33: env->iaoq_f = val; @@ -278,5 +272,5 @@ int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) } break; } - return sizeof(target_ureg); + return 4; } diff --git a/target/hppa/helper.c b/target/hppa/helper.c index a8d3f456ee..859644c47a 100644 --- a/target/hppa/helper.c +++ b/target/hppa/helper.c @@ -25,22 +25,32 @@ #include "exec/helper-proto.h" #include "qemu/qemu-print.h" -target_ureg cpu_hppa_get_psw(CPUHPPAState *env) +target_ulong cpu_hppa_get_psw(CPUHPPAState *env) { - target_ureg psw; + target_ulong psw; + target_ulong mask1 = (target_ulong)-1 / 0xf; + target_ulong maskf = (target_ulong)-1 / 0xffff * 0xf; /* Fold carry bits down to 8 consecutive bits. */ - /* ??? Needs tweaking for hppa64. */ - /* .......b...c...d...e...f...g...h */ - psw = (env->psw_cb >> 4) & 0x01111111; - /* .......b..bc..cd..de..ef..fg..gh */ + /* ^^^b^^^c^^^d^^^e^^^f^^^g^^^h^^^i^^^j^^^k^^^l^^^m^^^n^^^o^^^p^^^^ */ + psw = (env->psw_cb >> 4) & mask1; + /* .......b...c...d...e...f...g...h...i...j...k...l...m...n...o...p */ psw |= psw >> 3; - /* .............bcd............efgh */ - psw |= (psw >> 6) & 0x000f000f; - /* .........................bcdefgh */ - psw |= (psw >> 12) & 0xf; - psw |= env->psw_cb_msb << 7; - psw = (psw & 0xff) << 8; + /* .......b..bc..cd..de..ef..fg..gh..hi..ij..jk..kl..lm..mn..no..op */ + psw |= psw >> 6; + psw &= maskf; + /* .............bcd............efgh............ijkl............mnop */ + psw |= psw >> 12; + /* .............bcd.........bcdefgh........efghijkl........ijklmnop */ + psw |= env->psw_cb_msb << 39; + /* .............bcd........abcdefgh........efghijkl........ijklmnop */ + + /* For hppa64, the two 8-bit fields are discontiguous. */ + if (hppa_is_pa20(env)) { + psw = (psw & 0xff00000000ull) | ((psw & 0xff) << 8); + } else { + psw = (psw & 0xff) << 8; + } psw |= env->psw_n * PSW_N; psw |= (env->psw_v < 0) * PSW_V; @@ -49,16 +59,36 @@ target_ureg cpu_hppa_get_psw(CPUHPPAState *env) return psw; } -void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw) +void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw) { - target_ureg old_psw = env->psw; - target_ureg cb = 0; + uint64_t reserved; + target_ulong cb = 0; + + /* Do not allow reserved bits to be set. */ + if (hppa_is_pa20(env)) { + reserved = MAKE_64BIT_MASK(40, 24) | MAKE_64BIT_MASK(28, 4); + reserved |= PSW_G; /* PA1.x only */ + reserved |= PSW_E; /* not implemented */ + } else { + reserved = MAKE_64BIT_MASK(32, 32) | MAKE_64BIT_MASK(28, 2); + reserved |= PSW_O | PSW_W; /* PA2.0 only */ + reserved |= PSW_E | PSW_Y | PSW_Z; /* not implemented */ + } + psw &= ~reserved; env->psw = psw & ~(PSW_N | PSW_V | PSW_CB); env->psw_n = (psw / PSW_N) & 1; env->psw_v = -((psw / PSW_V) & 1); - env->psw_cb_msb = (psw >> 15) & 1; + env->psw_cb_msb = (psw >> 39) & 1; + cb |= ((psw >> 38) & 1) << 60; + cb |= ((psw >> 37) & 1) << 56; + cb |= ((psw >> 36) & 1) << 52; + cb |= ((psw >> 35) & 1) << 48; + cb |= ((psw >> 34) & 1) << 44; + cb |= ((psw >> 33) & 1) << 40; + cb |= ((psw >> 32) & 1) << 36; + cb |= ((psw >> 15) & 1) << 32; cb |= ((psw >> 14) & 1) << 28; cb |= ((psw >> 13) & 1) << 24; cb |= ((psw >> 12) & 1) << 20; @@ -67,29 +97,30 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw) cb |= ((psw >> 9) & 1) << 8; cb |= ((psw >> 8) & 1) << 4; env->psw_cb = cb; - - /* If PSW_P changes, it affects how we translate addresses. */ - if ((psw ^ old_psw) & PSW_P) { -#ifndef CONFIG_USER_ONLY - tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); -#endif - } } void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags) { - HPPACPU *cpu = HPPA_CPU(cs); - CPUHPPAState *env = &cpu->env; - target_ureg psw = cpu_hppa_get_psw(env); - target_ureg psw_cb; + CPUHPPAState *env = cpu_env(cs); + target_ulong psw = cpu_hppa_get_psw(env); + target_ulong psw_cb; char psw_c[20]; - int i; + int i, w; + uint64_t m; + + if (hppa_is_pa20(env)) { + w = 16; + m = UINT64_MAX; + } else { + w = 8; + m = UINT32_MAX; + } qemu_fprintf(f, "IA_F " TARGET_FMT_lx " IA_B " TARGET_FMT_lx - " IIR " TREG_FMT_lx "\n", + " IIR %0*" PRIx64 "\n", hppa_form_gva_psw(psw, env->iasq_f, env->iaoq_f), hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b), - env->cr[CR_IIR]); + w, m & env->cr[CR_IIR]); psw_c[0] = (psw & PSW_W ? 'W' : '-'); psw_c[1] = (psw & PSW_E ? 'E' : '-'); @@ -110,13 +141,15 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags) psw_c[16] = (psw & PSW_D ? 'D' : '-'); psw_c[17] = (psw & PSW_I ? 'I' : '-'); psw_c[18] = '\0'; - psw_cb = ((env->psw_cb >> 4) & 0x01111111) | (env->psw_cb_msb << 28); + psw_cb = ((env->psw_cb >> 4) & 0x1111111111111111ull) + | (env->psw_cb_msb << 60); - qemu_fprintf(f, "PSW " TREG_FMT_lx " CB " TREG_FMT_lx " %s\n", - psw, psw_cb, psw_c); + qemu_fprintf(f, "PSW %0*" PRIx64 " CB %0*" PRIx64 " %s\n", + w, m & psw, w, m & psw_cb, psw_c); for (i = 0; i < 32; i++) { - qemu_fprintf(f, "GR%02d " TREG_FMT_lx "%c", i, env->gr[i], + qemu_fprintf(f, "GR%02d %0*" PRIx64 "%c", + i, w, m & env->gr[i], (i & 3) == 3 ? '\n' : ' '); } #ifndef CONFIG_USER_ONLY diff --git a/target/hppa/helper.h b/target/hppa/helper.h index 647f043c85..20698f68ed 100644 --- a/target/hppa/helper.h +++ b/target/hppa/helper.h @@ -1,24 +1,28 @@ -#if TARGET_REGISTER_BITS == 64 -# define dh_alias_tr i64 -# define dh_typecode_tr dh_typecode_i64 -#else -# define dh_alias_tr i32 -# define dh_typecode_tr dh_typecode_i32 -#endif -#define dh_ctype_tr target_ureg - DEF_HELPER_2(excp, noreturn, env, int) -DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tr) -DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tr) +DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tl) +DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tl) -DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tr) -DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tr) -DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tr) -DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tr) +DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tl) + +DEF_HELPER_FLAGS_3(stdby_b, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(stdby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(stdby_e, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(stdby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tl) DEF_HELPER_FLAGS_1(ldc_check, TCG_CALL_NO_RWG, void, tl) -DEF_HELPER_FLAGS_4(probe, TCG_CALL_NO_WG, tr, env, tl, i32, i32) +DEF_HELPER_FLAGS_2(hadd_ss, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(hadd_us, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(havg, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_3(hshladd, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) +DEF_HELPER_FLAGS_3(hshradd, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) +DEF_HELPER_FLAGS_2(hsub_ss, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(hsub_us, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_4(probe, TCG_CALL_NO_WG, tl, env, tl, i32, i32) DEF_HELPER_FLAGS_1(loaded_fr0, TCG_CALL_NO_RWG, void, env) @@ -77,7 +81,7 @@ DEF_HELPER_FLAGS_4(fmpynfadd_s, TCG_CALL_NO_RWG, i32, env, i32, i32, i32) DEF_HELPER_FLAGS_4(fmpyfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) DEF_HELPER_FLAGS_4(fmpynfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) -DEF_HELPER_FLAGS_0(read_interval_timer, TCG_CALL_NO_RWG, tr) +DEF_HELPER_FLAGS_0(read_interval_timer, TCG_CALL_NO_RWG, tl) #ifndef CONFIG_USER_ONLY DEF_HELPER_1(halt, noreturn, env) @@ -85,15 +89,18 @@ DEF_HELPER_1(reset, noreturn, env) DEF_HELPER_1(getshadowregs, void, env) DEF_HELPER_1(rfi, void, env) DEF_HELPER_1(rfi_r, void, env) -DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tr) -DEF_HELPER_FLAGS_2(write_eirr, TCG_CALL_NO_RWG, void, env, tr) -DEF_HELPER_FLAGS_2(write_eiem, TCG_CALL_NO_RWG, void, env, tr) -DEF_HELPER_FLAGS_2(swap_system_mask, TCG_CALL_NO_RWG, tr, env, tr) -DEF_HELPER_FLAGS_3(itlba, TCG_CALL_NO_RWG, void, env, tl, tr) -DEF_HELPER_FLAGS_3(itlbp, TCG_CALL_NO_RWG, void, env, tl, tr) +DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(write_eirr, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(write_eiem, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(swap_system_mask, TCG_CALL_NO_RWG, tl, env, tl) +DEF_HELPER_FLAGS_3(itlba_pa11, TCG_CALL_NO_RWG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(itlbp_pa11, TCG_CALL_NO_RWG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(idtlbt_pa20, TCG_CALL_NO_RWG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(iitlbt_pa20, TCG_CALL_NO_RWG, void, env, tl, tl) DEF_HELPER_FLAGS_2(ptlb, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(ptlb_l, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_1(ptlbe, TCG_CALL_NO_RWG, void, env) -DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tr, env, tl) +DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tl, env, tl) DEF_HELPER_FLAGS_1(change_prot_id, TCG_CALL_NO_RWG, void, env) DEF_HELPER_1(diag_btlb, void, env) #endif diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode index aebe03ccfd..f5a3f02fd1 100644 --- a/target/hppa/insns.decode +++ b/target/hppa/insns.decode @@ -46,11 +46,16 @@ %im5_0 0:s1 1:4 %im5_16 16:s1 17:4 +%len5 0:5 !function=assemble_6 +%len6_8 8:1 0:5 !function=assemble_6 +%len6_12 12:1 0:5 !function=assemble_6 +%cpos6_11 11:1 5:5 %ma_to_m 5:1 13:1 !function=ma_to_m %ma2_to_m 2:2 !function=ma_to_m %pos_to_m 0:1 !function=pos_to_m %neg_to_m 0:1 !function=neg_to_m %a_to_m 2:1 !function=neg_to_m +%cmpbid_c 13:2 !function=cmpbid_c #### # Argument set definitions @@ -59,28 +64,43 @@ # All insns that need to form a virtual address should use this set. &ldst t b x disp sp m scale size -&rr_cf t r cf +&rr_cf_d t r cf d +&rrr t r1 r2 &rrr_cf t r1 r2 cf -&rrr_cf_sh t r1 r2 cf sh +&rrr_cf_d t r1 r2 cf d +&rrr_sh t r1 r2 sh +&rrr_cf_d_sh t r1 r2 cf d sh +&rri t r i &rri_cf t r i cf +&rri_cf_d t r i cf d &rrb_c_f disp n c f r1 r2 +&rrb_c_d_f disp n c d f r1 r2 &rib_c_f disp n c f r i +&rib_c_d_f disp n c d f r i #### # Format definitions #### -@rr_cf ...... r:5 ..... cf:4 ....... t:5 &rr_cf +@rr_cf_d ...... r:5 ..... cf:4 ...... d:1 t:5 &rr_cf_d +@rrr ...... r2:5 r1:5 .... ....... t:5 &rrr @rrr_cf ...... r2:5 r1:5 cf:4 ....... t:5 &rrr_cf -@rrr_cf_sh ...... r2:5 r1:5 cf:4 .... sh:2 . t:5 &rrr_cf_sh -@rrr_cf_sh0 ...... r2:5 r1:5 cf:4 ....... t:5 &rrr_cf_sh sh=0 +@rrr_cf_d ...... r2:5 r1:5 cf:4 ...... d:1 t:5 &rrr_cf_d +@rrr_sh ...... r2:5 r1:5 ........ sh:2 . t:5 &rrr_sh +@rrr_cf_d_sh ...... r2:5 r1:5 cf:4 .... sh:2 d:1 t:5 &rrr_cf_d_sh +@rrr_cf_d_sh0 ...... r2:5 r1:5 cf:4 ...... d:1 t:5 &rrr_cf_d_sh sh=0 @rri_cf ...... r:5 t:5 cf:4 . ........... &rri_cf i=%lowsign_11 +@rri_cf_d ...... r:5 t:5 cf:4 d:1 ........... &rri_cf_d i=%lowsign_11 @rrb_cf ...... r2:5 r1:5 c:3 ........... n:1 . \ &rrb_c_f disp=%assemble_12 +@rrb_cdf ...... r2:5 r1:5 c:3 ........... n:1 . \ + &rrb_c_d_f disp=%assemble_12 @rib_cf ...... r:5 ..... c:3 ........... n:1 . \ &rib_c_f disp=%assemble_12 i=%im5_16 +@rib_cdf ...... r:5 ..... c:3 ........... n:1 . \ + &rib_c_d_f disp=%assemble_12 i=%im5_16 #### # System @@ -130,6 +150,7 @@ nop_addrx 000001 ..... ..... -- 01001110 . 00000 @addrx # pdc probe 000001 b:5 ri:5 sp:2 imm:1 100011 write:1 0 t:5 +# pa1.x tlb insert instructions ixtlbx 000001 b:5 r:5 sp:2 0100000 addr:1 0 00000 data=1 ixtlbx 000001 b:5 r:5 ... 000000 addr:1 0 00000 \ sp=%assemble_sr3x data=0 @@ -137,9 +158,26 @@ ixtlbx 000001 b:5 r:5 ... 000000 addr:1 0 00000 \ # pcxl and pcxl2 Fast TLB Insert instructions ixtlbxf 000001 00000 r:5 00 0 data:1 01000 addr:1 0 00000 -pxtlbx 000001 b:5 x:5 sp:2 0100100 local:1 m:1 ----- data=1 -pxtlbx 000001 b:5 x:5 ... 000100 local:1 m:1 ----- \ - sp=%assemble_sr3x data=0 +# pa2.0 tlb insert idtlbt and iitlbt instructions +ixtlbt 000001 r2:5 r1:5 000 data:1 100000 0 00000 # idtlbt + +# pdtlb, pitlb +pxtlb 000001 b:5 x:5 sp:2 01001000 m:1 ----- \ + &ldst disp=0 scale=0 size=0 t=0 +pxtlb 000001 b:5 x:5 ... 0001000 m:1 ----- \ + &ldst disp=0 scale=0 size=0 t=0 sp=%assemble_sr3x + +# ... pa20 local +pxtlb_l 000001 b:5 x:5 sp:2 01011000 m:1 ----- \ + &ldst disp=0 scale=0 size=0 t=0 +pxtlb_l 000001 b:5 x:5 ... 0011000 m:1 ----- \ + &ldst disp=0 scale=0 size=0 t=0 sp=%assemble_sr3x + +# pdtlbe, pitlbe +pxtlbe 000001 b:5 x:5 sp:2 01001001 m:1 ----- \ + &ldst disp=0 scale=0 size=0 t=0 +pxtlbe 000001 b:5 x:5 ... 0001001 m:1 ----- \ + &ldst disp=0 scale=0 size=0 t=0 sp=%assemble_sr3x lpa 000001 b:5 x:5 sp:2 01001101 m:1 t:5 \ &ldst disp=0 scale=0 size=0 @@ -150,30 +188,36 @@ lci 000001 ----- ----- -- 01001100 0 t:5 # Arith/Log #### -andcm 000010 ..... ..... .... 000000 - ..... @rrr_cf -and 000010 ..... ..... .... 001000 - ..... @rrr_cf -or 000010 ..... ..... .... 001001 - ..... @rrr_cf -xor 000010 ..... ..... .... 001010 0 ..... @rrr_cf -uxor 000010 ..... ..... .... 001110 0 ..... @rrr_cf +andcm 000010 ..... ..... .... 000000 . ..... @rrr_cf_d +and 000010 ..... ..... .... 001000 . ..... @rrr_cf_d +or 000010 ..... ..... .... 001001 . ..... @rrr_cf_d +xor 000010 ..... ..... .... 001010 . ..... @rrr_cf_d +uxor 000010 ..... ..... .... 001110 . ..... @rrr_cf_d ds 000010 ..... ..... .... 010001 0 ..... @rrr_cf -cmpclr 000010 ..... ..... .... 100010 0 ..... @rrr_cf -uaddcm 000010 ..... ..... .... 100110 0 ..... @rrr_cf -uaddcm_tc 000010 ..... ..... .... 100111 0 ..... @rrr_cf -dcor 000010 ..... 00000 .... 101110 0 ..... @rr_cf -dcor_i 000010 ..... 00000 .... 101111 0 ..... @rr_cf - -add 000010 ..... ..... .... 0110.. - ..... @rrr_cf_sh -add_l 000010 ..... ..... .... 1010.. 0 ..... @rrr_cf_sh -add_tsv 000010 ..... ..... .... 1110.. 0 ..... @rrr_cf_sh -add_c 000010 ..... ..... .... 011100 0 ..... @rrr_cf_sh0 -add_c_tsv 000010 ..... ..... .... 111100 0 ..... @rrr_cf_sh0 - -sub 000010 ..... ..... .... 010000 - ..... @rrr_cf -sub_tsv 000010 ..... ..... .... 110000 0 ..... @rrr_cf -sub_tc 000010 ..... ..... .... 010011 0 ..... @rrr_cf -sub_tsv_tc 000010 ..... ..... .... 110011 0 ..... @rrr_cf -sub_b 000010 ..... ..... .... 010100 0 ..... @rrr_cf -sub_b_tsv 000010 ..... ..... .... 110100 0 ..... @rrr_cf +cmpclr 000010 ..... ..... .... 100010 . ..... @rrr_cf_d +uaddcm 000010 ..... ..... .... 100110 . ..... @rrr_cf_d +uaddcm_tc 000010 ..... ..... .... 100111 . ..... @rrr_cf_d +dcor 000010 ..... 00000 .... 101110 . ..... @rr_cf_d +dcor_i 000010 ..... 00000 .... 101111 . ..... @rr_cf_d + +add 000010 ..... ..... .... 0110.. . ..... @rrr_cf_d_sh +add_l 000010 ..... ..... .... 1010.. . ..... @rrr_cf_d_sh +add_tsv 000010 ..... ..... .... 1110.. . ..... @rrr_cf_d_sh +{ + add_c 000010 ..... ..... .... 011100 . ..... @rrr_cf_d_sh0 + hshladd 000010 ..... ..... 0000 0111.. 0 ..... @rrr_sh +} +add_c_tsv 000010 ..... ..... .... 111100 . ..... @rrr_cf_d_sh0 + +sub 000010 ..... ..... .... 010000 . ..... @rrr_cf_d +sub_tsv 000010 ..... ..... .... 110000 . ..... @rrr_cf_d +sub_tc 000010 ..... ..... .... 010011 . ..... @rrr_cf_d +sub_tsv_tc 000010 ..... ..... .... 110011 . ..... @rrr_cf_d +{ + sub_b 000010 ..... ..... .... 010100 . ..... @rrr_cf_d + hshradd 000010 ..... ..... 0000 0101.. 0 ..... @rrr_sh +} +sub_b_tsv 000010 ..... ..... .... 110100 . ..... @rrr_cf_d ldil 001000 t:5 ..................... i=%assemble_21 addil 001010 r:5 ..................... i=%assemble_21 @@ -187,7 +231,28 @@ addi_tc_tsv 101100 ..... ..... .... 1 ........... @rri_cf subi 100101 ..... ..... .... 0 ........... @rri_cf subi_tsv 100101 ..... ..... .... 1 ........... @rri_cf -cmpiclr 100100 ..... ..... .... 0 ........... @rri_cf +cmpiclr 100100 ..... ..... .... . ........... @rri_cf_d + +hadd 000010 ..... ..... 00000011 11 0 ..... @rrr +hadd_ss 000010 ..... ..... 00000011 01 0 ..... @rrr +hadd_us 000010 ..... ..... 00000011 00 0 ..... @rrr + +havg 000010 ..... ..... 00000010 11 0 ..... @rrr + +hshl 111110 00000 r:5 100010 i:4 0 t:5 &rri +hshr_s 111110 r:5 00000 110011 i:4 0 t:5 &rri +hshr_u 111110 r:5 00000 110010 i:4 0 t:5 &rri + +hsub 000010 ..... ..... 00000001 11 0 ..... @rrr +hsub_ss 000010 ..... ..... 00000001 01 0 ..... @rrr +hsub_us 000010 ..... ..... 00000001 00 0 ..... @rrr + +mixh_l 111110 ..... ..... 1 00 00100000 ..... @rrr +mixh_r 111110 ..... ..... 1 10 00100000 ..... @rrr +mixw_l 111110 ..... ..... 1 00 00000000 ..... @rrr +mixw_r 111110 ..... ..... 1 10 00000000 ..... @rrr + +permh 111110 r1:5 r2:5 0 c0:2 0 c1:2 c2:2 c3:2 0 t:5 #### # Index Mem @@ -204,10 +269,16 @@ ld 000011 ..... ..... .. . 0 -- 00 size:2 ...... @ldstx st 000011 ..... ..... .. . 1 -- 10 size:2 ...... @stim5 ldc 000011 ..... ..... .. . 1 -- 0111 ...... @ldim5 size=2 ldc 000011 ..... ..... .. . 0 -- 0111 ...... @ldstx size=2 +ldc 000011 ..... ..... .. . 1 -- 0101 ...... @ldim5 size=3 +ldc 000011 ..... ..... .. . 0 -- 0101 ...... @ldstx size=3 lda 000011 ..... ..... .. . 1 -- 0110 ...... @ldim5 size=2 lda 000011 ..... ..... .. . 0 -- 0110 ...... @ldstx size=2 +lda 000011 ..... ..... .. . 1 -- 0100 ...... @ldim5 size=3 +lda 000011 ..... ..... .. . 0 -- 0100 ...... @ldstx size=3 sta 000011 ..... ..... .. . 1 -- 1110 ...... @stim5 size=2 +sta 000011 ..... ..... .. . 1 -- 1111 ...... @stim5 size=3 stby 000011 b:5 r:5 sp:2 a:1 1 -- 1100 m:1 ..... disp=%im5_0 +stdby 000011 b:5 r:5 sp:2 a:1 1 -- 1101 m:1 ..... disp=%im5_0 @fldstwx ...... b:5 x:5 sp:2 scale:1 ....... m:1 ..... \ &ldst t=%rt64 disp=0 size=2 @@ -233,6 +304,8 @@ fstd 001011 ..... ..... .. . 1 -- 100 0 . ..... @fldstdi # Offset Mem #### +@ldstim11 ...... b:5 t:5 sp:2 .............. \ + &ldst disp=%assemble_11a m=%ma2_to_m x=0 scale=0 size=3 @ldstim14 ...... b:5 t:5 sp:2 .............. \ &ldst disp=%lowsign_14 x=0 scale=0 m=0 @ldstim14m ...... b:5 t:5 sp:2 .............. \ @@ -264,11 +337,11 @@ fstw 011110 b:5 ..... sp:2 .............. \ fstw 011111 b:5 ..... sp:2 ...........0.. \ &ldst disp=%assemble_12a t=%rm64 m=0 x=0 scale=0 size=2 -fldd 010100 b:5 t:5 sp:2 .......... .. 1 . \ - &ldst disp=%assemble_11a m=%ma2_to_m x=0 scale=0 size=3 +ld 010100 ..... ..... .. ............0. @ldstim11 +fldd 010100 ..... ..... .. ............1. @ldstim11 -fstd 011100 b:5 t:5 sp:2 .......... .. 1 . \ - &ldst disp=%assemble_11a m=%ma2_to_m x=0 scale=0 size=3 +st 011100 ..... ..... .. ............0. @ldstim11 +fstd 011100 ..... ..... .. ............1. @ldstim11 #### # Floating-point Multiply Add @@ -286,16 +359,20 @@ fmpysub_d 100110 ..... ..... ..... ..... 1 ..... @mpyadd # Conditional Branches #### -bb_sar 110000 00000 r:5 c:1 10 ........... n:1 . disp=%assemble_12 -bb_imm 110001 p:5 r:5 c:1 10 ........... n:1 . disp=%assemble_12 +bb_sar 110000 00000 r:5 c:1 1 d:1 ........... n:1 . disp=%assemble_12 +bb_imm 110001 p:5 r:5 c:1 1 d:1 ........... n:1 . disp=%assemble_12 movb 110010 ..... ..... ... ........... . . @rrb_cf f=0 movbi 110011 ..... ..... ... ........... . . @rib_cf f=0 -cmpb 100000 ..... ..... ... ........... . . @rrb_cf f=0 -cmpb 100010 ..... ..... ... ........... . . @rrb_cf f=1 -cmpbi 100001 ..... ..... ... ........... . . @rib_cf f=0 -cmpbi 100011 ..... ..... ... ........... . . @rib_cf f=1 +cmpb 100000 ..... ..... ... ........... . . @rrb_cdf d=0 f=0 +cmpb 100010 ..... ..... ... ........... . . @rrb_cdf d=0 f=1 +cmpb 100111 ..... ..... ... ........... . . @rrb_cdf d=1 f=0 +cmpb 101111 ..... ..... ... ........... . . @rrb_cdf d=1 f=1 +cmpbi 100001 ..... ..... ... ........... . . @rib_cdf d=0 f=0 +cmpbi 100011 ..... ..... ... ........... . . @rib_cdf d=0 f=1 +cmpbi 111011 r:5 ..... f:1 .. ........... n:1 . \ + &rib_c_d_f d=1 disp=%assemble_12 c=%cmpbid_c i=%im5_16 addb 101000 ..... ..... ... ........... . . @rrb_cf f=0 addb 101010 ..... ..... ... ........... . . @rrb_cf f=1 @@ -306,16 +383,28 @@ addbi 101011 ..... ..... ... ........... . . @rib_cf f=1 # Shift, Extract, Deposit #### -shrpw_sar 110100 r2:5 r1:5 c:3 00 0 00000 t:5 -shrpw_imm 110100 r2:5 r1:5 c:3 01 0 cpos:5 t:5 - -extrw_sar 110100 r:5 t:5 c:3 10 se:1 00000 clen:5 -extrw_imm 110100 r:5 t:5 c:3 11 se:1 pos:5 clen:5 - -depw_sar 110101 t:5 r:5 c:3 00 nz:1 00000 clen:5 -depw_imm 110101 t:5 r:5 c:3 01 nz:1 cpos:5 clen:5 -depwi_sar 110101 t:5 ..... c:3 10 nz:1 00000 clen:5 i=%im5_16 -depwi_imm 110101 t:5 ..... c:3 11 nz:1 cpos:5 clen:5 i=%im5_16 +shrp_sar 110100 r2:5 r1:5 c:3 00 0 d:1 0000 t:5 +shrp_imm 110100 r2:5 r1:5 c:3 01 0 cpos:5 t:5 d=0 +shrp_imm 110100 r2:5 r1:5 c:3 0. 1 ..... t:5 \ + d=1 cpos=%cpos6_11 + +extr_sar 110100 r:5 t:5 c:3 10 se:1 00 000 ..... d=0 len=%len5 +extr_sar 110100 r:5 t:5 c:3 10 se:1 1. 000 ..... d=1 len=%len6_8 +extr_imm 110100 r:5 t:5 c:3 11 se:1 pos:5 ..... d=0 len=%len5 +extr_imm 110110 r:5 t:5 c:3 .. se:1 ..... ..... \ + d=1 len=%len6_12 pos=%cpos6_11 + +dep_sar 110101 t:5 r:5 c:3 00 nz:1 00 000 ..... d=0 len=%len5 +dep_sar 110101 t:5 r:5 c:3 00 nz:1 1. 000 ..... d=1 len=%len6_8 +dep_imm 110101 t:5 r:5 c:3 01 nz:1 cpos:5 ..... d=0 len=%len5 +dep_imm 111100 t:5 r:5 c:3 .. nz:1 ..... ..... \ + d=1 len=%len6_12 cpos=%cpos6_11 +depi_sar 110101 t:5 ..... c:3 10 nz:1 d:1 . 000 ..... \ + i=%im5_16 len=%len6_8 +depi_imm 110101 t:5 ..... c:3 11 nz:1 cpos:5 ..... \ + d=0 i=%im5_16 len=%len5 +depi_imm 111101 t:5 ..... c:3 .. nz:1 ..... ..... \ + d=1 i=%im5_16 len=%len6_12 cpos=%cpos6_11 #### # Branch External @@ -343,6 +432,8 @@ bl 111010 ..... ..... 101 ........... n:1 . &BL l=2 \ disp=%assemble_22 b_gate 111010 ..... ..... 001 ........... . . @bl blr 111010 l:5 x:5 010 00000000000 n:1 0 +nopbts 111010 00000 00000 010 0---------1 0 1 # clrbts/popbts +nopbts 111010 00000 ----- 010 00000000000 0 1 # pushbts/pushnom bv 111010 b:5 x:5 110 00000000000 n:1 0 bve 111010 b:5 00000 110 10000000000 n:1 - l=0 bve 111010 b:5 00000 111 10000000000 n:1 - l=2 @@ -384,7 +475,7 @@ fmpyfadd_d 101110 rm1:5 rm2:5 ... 0 1 ..0 0 0 neg:1 t:5 ra3=%rc32 @f0e_f_3 ...... ..... ..... ... .0 110 ..0 ..... \ &fclass3 r1=%ra64 r2=%rb64 t=%rt64 -@f0e_d_3 ...... r1:5 r2:5 ... 01 110 000 t:5 +@f0e_d_3 ...... r1:5 r2:5 ... 01 110 000 t:5 &fclass3 # Floating point class 0 diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c index 3ab9934a1d..467ee7daf5 100644 --- a/target/hppa/int_helper.c +++ b/target/hppa/int_helper.c @@ -52,9 +52,17 @@ static void io_eir_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { HPPACPU *cpu = opaque; - int le_bit = ~data & (TARGET_REGISTER_BITS - 1); + CPUHPPAState *env = &cpu->env; + int widthm1 = 31; + int le_bit; + + /* The default PSW.W controls the width of EIRR. */ + if (hppa_is_pa20(env) && env->cr[CR_PSW_DEFAULT] & PDC_PSW_WIDE_BIT) { + widthm1 = 63; + } + le_bit = ~data & widthm1; - cpu->env.cr[CR_EIRR] |= (target_ureg)1 << le_bit; + env->cr[CR_EIRR] |= 1ull << le_bit; eval_interrupt(cpu); } @@ -73,7 +81,7 @@ void hppa_cpu_alarm_timer(void *opaque) io_eir_write(opaque, 0, 0, 4); } -void HELPER(write_eirr)(CPUHPPAState *env, target_ureg val) +void HELPER(write_eirr)(CPUHPPAState *env, target_ulong val) { env->cr[CR_EIRR] &= ~val; qemu_mutex_lock_iothread(); @@ -81,7 +89,7 @@ void HELPER(write_eirr)(CPUHPPAState *env, target_ureg val) qemu_mutex_unlock_iothread(); } -void HELPER(write_eiem)(CPUHPPAState *env, target_ureg val) +void HELPER(write_eiem)(CPUHPPAState *env, target_ulong val) { env->cr[CR_EIEM] = val; qemu_mutex_lock_iothread(); @@ -94,25 +102,37 @@ void hppa_cpu_do_interrupt(CPUState *cs) HPPACPU *cpu = HPPA_CPU(cs); CPUHPPAState *env = &cpu->env; int i = cs->exception_index; - target_ureg iaoq_f = env->iaoq_f; - target_ureg iaoq_b = env->iaoq_b; - uint64_t iasq_f = env->iasq_f; - uint64_t iasq_b = env->iasq_b; - - target_ureg old_psw; + uint64_t old_psw; /* As documented in pa2.0 -- interruption handling. */ /* step 1 */ env->cr[CR_IPSW] = old_psw = cpu_hppa_get_psw(env); - /* step 2 -- note PSW_W == 0 for !HPPA64. */ - cpu_hppa_put_psw(env, PSW_W | (i == EXCP_HPMC ? PSW_M : 0)); + /* step 2 -- Note PSW_W is masked out again for pa1.x */ + cpu_hppa_put_psw(env, + (env->cr[CR_PSW_DEFAULT] & PDC_PSW_WIDE_BIT ? PSW_W : 0) | + (i == EXCP_HPMC ? PSW_M : 0)); /* step 3 */ - env->cr[CR_IIASQ] = iasq_f >> 32; - env->cr_back[0] = iasq_b >> 32; - env->cr[CR_IIAOQ] = iaoq_f; - env->cr_back[1] = iaoq_b; + /* + * For pa1.x, IIASQ is simply a copy of IASQ. + * For pa2.0, IIASQ is the top bits of the virtual address, + * or zero if translation is disabled. + */ + if (!hppa_is_pa20(env)) { + env->cr[CR_IIASQ] = env->iasq_f >> 32; + env->cr_back[0] = env->iasq_b >> 32; + } else if (old_psw & PSW_C) { + env->cr[CR_IIASQ] = + hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32; + env->cr_back[0] = + hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32; + } else { + env->cr[CR_IIASQ] = 0; + env->cr_back[0] = 0; + } + env->cr[CR_IIAOQ] = env->iaoq_f; + env->cr_back[1] = env->iaoq_b; if (old_psw & PSW_Q) { /* step 5 */ @@ -145,14 +165,13 @@ void hppa_cpu_do_interrupt(CPUState *cs) /* ??? An alternate fool-proof method would be to store the instruction data into the unwind info. That's probably a bit too much in the way of extra storage required. */ - vaddr vaddr; - hwaddr paddr; + vaddr vaddr = env->iaoq_f & -4; + hwaddr paddr = vaddr; - paddr = vaddr = iaoq_f & -4; if (old_psw & PSW_C) { int prot, t; - vaddr = hppa_form_gva_psw(old_psw, iasq_f, vaddr); + vaddr = hppa_form_gva_psw(old_psw, env->iasq_f, vaddr); t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX, 0, &paddr, &prot, NULL); if (t >= 0) { @@ -182,14 +201,14 @@ void hppa_cpu_do_interrupt(CPUState *cs) /* step 7 */ if (i == EXCP_TOC) { - env->iaoq_f = FIRMWARE_START; + env->iaoq_f = hppa_form_gva(env, 0, FIRMWARE_START); /* help SeaBIOS and provide iaoq_b and iasq_back in shadow regs */ env->gr[24] = env->cr_back[0]; env->gr[25] = env->cr_back[1]; } else { - env->iaoq_f = env->cr[CR_IVA] + 32 * i; + env->iaoq_f = hppa_form_gva(env, 0, env->cr[CR_IVA] + 32 * i); } - env->iaoq_b = env->iaoq_f + 4; + env->iaoq_b = hppa_form_gva(env, 0, env->iaoq_f + 4); env->iasq_f = 0; env->iasq_b = 0; @@ -239,14 +258,10 @@ void hppa_cpu_do_interrupt(CPUState *cs) snprintf(unknown, sizeof(unknown), "unknown %d", i); name = unknown; } - qemu_log("INT %6d: %s @ " TARGET_FMT_lx "," TARGET_FMT_lx - " -> " TREG_FMT_lx " " TARGET_FMT_lx "\n", - ++count, name, - hppa_form_gva(env, iasq_f, iaoq_f), - hppa_form_gva(env, iasq_b, iaoq_b), - env->iaoq_f, - hppa_form_gva(env, (uint64_t)env->cr[CR_ISR] << 32, - env->cr[CR_IOR])); + qemu_log("INT %6d: %s @ " TARGET_FMT_lx ":" TARGET_FMT_lx + " for " TARGET_FMT_lx ":" TARGET_FMT_lx "\n", + ++count, name, env->cr[CR_IIASQ], env->cr[CR_IIAOQ], + env->cr[CR_ISR], env->cr[CR_IOR]); } cs->exception_index = -1; } diff --git a/target/hppa/machine.c b/target/hppa/machine.c index 905991d7f9..2f8e8cc5a1 100644 --- a/target/hppa/machine.c +++ b/target/hppa/machine.c @@ -21,33 +21,12 @@ #include "cpu.h" #include "migration/cpu.h" -#if TARGET_REGISTER_BITS == 64 -#define qemu_put_betr qemu_put_be64 -#define qemu_get_betr qemu_get_be64 -#define VMSTATE_UINTTL_V(_f, _s, _v) \ - VMSTATE_UINT64_V(_f, _s, _v) -#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \ - VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v) -#else -#define qemu_put_betr qemu_put_be32 -#define qemu_get_betr qemu_get_be32 -#define VMSTATE_UINTTR_V(_f, _s, _v) \ - VMSTATE_UINT32_V(_f, _s, _v) -#define VMSTATE_UINTTR_ARRAY_V(_f, _s, _n, _v) \ - VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v) -#endif - -#define VMSTATE_UINTTR(_f, _s) \ - VMSTATE_UINTTR_V(_f, _s, 0) -#define VMSTATE_UINTTR_ARRAY(_f, _s, _n) \ - VMSTATE_UINTTR_ARRAY_V(_f, _s, _n, 0) - static int get_psw(QEMUFile *f, void *opaque, size_t size, const VMStateField *field) { CPUHPPAState *env = opaque; - cpu_hppa_put_psw(env, qemu_get_betr(f)); + cpu_hppa_put_psw(env, qemu_get_be64(f)); return 0; } @@ -55,7 +34,7 @@ static int put_psw(QEMUFile *f, void *opaque, size_t size, const VMStateField *field, JSONWriter *vmdesc) { CPUHPPAState *env = opaque; - qemu_put_betr(f, cpu_hppa_get_psw(env)); + qemu_put_be64(f, cpu_hppa_get_psw(env)); return 0; } @@ -65,70 +44,138 @@ static const VMStateInfo vmstate_psw = { .put = put_psw, }; -/* FIXME: Use the PA2.0 format, which is a superset of the PA1.1 format. */ static int get_tlb(QEMUFile *f, void *opaque, size_t size, const VMStateField *field) { - hppa_tlb_entry *ent = opaque; - uint32_t val; - - memset(ent, 0, sizeof(*ent)); - - ent->va_b = qemu_get_be64(f); - ent->pa = qemu_get_betr(f); - val = qemu_get_be32(f); - - ent->entry_valid = extract32(val, 0, 1); - ent->access_id = extract32(val, 1, 18); - ent->u = extract32(val, 19, 1); - ent->ar_pl2 = extract32(val, 20, 2); - ent->ar_pl1 = extract32(val, 22, 2); - ent->ar_type = extract32(val, 24, 3); - ent->b = extract32(val, 27, 1); - ent->d = extract32(val, 28, 1); - ent->t = extract32(val, 29, 1); - - ent->va_e = ent->va_b + TARGET_PAGE_SIZE - 1; + HPPATLBEntry *ent = opaque; + uint64_t val; + + ent->itree.start = qemu_get_be64(f); + ent->itree.last = qemu_get_be64(f); + ent->pa = qemu_get_be64(f); + val = qemu_get_be64(f); + + if (val) { + ent->t = extract64(val, 61, 1); + ent->d = extract64(val, 60, 1); + ent->b = extract64(val, 59, 1); + ent->ar_type = extract64(val, 56, 3); + ent->ar_pl1 = extract64(val, 54, 2); + ent->ar_pl2 = extract64(val, 52, 2); + ent->u = extract64(val, 51, 1); + /* o = bit 50 */ + /* p = bit 49 */ + ent->access_id = extract64(val, 1, 31); + ent->entry_valid = 1; + } return 0; } static int put_tlb(QEMUFile *f, void *opaque, size_t size, const VMStateField *field, JSONWriter *vmdesc) { - hppa_tlb_entry *ent = opaque; - uint32_t val = 0; + HPPATLBEntry *ent = opaque; + uint64_t val = 0; if (ent->entry_valid) { val = 1; - val = deposit32(val, 1, 18, ent->access_id); - val = deposit32(val, 19, 1, ent->u); - val = deposit32(val, 20, 2, ent->ar_pl2); - val = deposit32(val, 22, 2, ent->ar_pl1); - val = deposit32(val, 24, 3, ent->ar_type); - val = deposit32(val, 27, 1, ent->b); - val = deposit32(val, 28, 1, ent->d); - val = deposit32(val, 29, 1, ent->t); + val = deposit64(val, 61, 1, ent->t); + val = deposit64(val, 60, 1, ent->d); + val = deposit64(val, 59, 1, ent->b); + val = deposit64(val, 56, 3, ent->ar_type); + val = deposit64(val, 54, 2, ent->ar_pl1); + val = deposit64(val, 52, 2, ent->ar_pl2); + val = deposit64(val, 51, 1, ent->u); + /* o = bit 50 */ + /* p = bit 49 */ + val = deposit64(val, 1, 31, ent->access_id); } - qemu_put_be64(f, ent->va_b); - qemu_put_betr(f, ent->pa); - qemu_put_be32(f, val); + qemu_put_be64(f, ent->itree.start); + qemu_put_be64(f, ent->itree.last); + qemu_put_be64(f, ent->pa); + qemu_put_be64(f, val); return 0; } -static const VMStateInfo vmstate_tlb = { +static const VMStateInfo vmstate_tlb_entry = { .name = "tlb entry", .get = get_tlb, .put = put_tlb, }; -static VMStateField vmstate_env_fields[] = { - VMSTATE_UINTTR_ARRAY(gr, CPUHPPAState, 32), +static int tlb_pre_load(void *opaque) +{ + CPUHPPAState *env = opaque; + + /* + * Zap the entire tlb, on-the-side data structures and all. + * Each tlb entry will have data re-filled by put_tlb. + */ + memset(env->tlb, 0, sizeof(env->tlb)); + memset(&env->tlb_root, 0, sizeof(env->tlb_root)); + env->tlb_unused = NULL; + env->tlb_partial = NULL; + + return 0; +} + +static int tlb_post_load(void *opaque, int version_id) +{ + CPUHPPAState *env = opaque; + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); + HPPATLBEntry **unused = &env->tlb_unused; + HPPATLBEntry *partial = NULL; + + /* + * Re-create the interval tree from the valid entries. + * Truely invalid entries should have start == end == 0. + * Otherwise it should be the in-flight tlb_partial entry. + */ + for (uint32_t i = 0; i < ARRAY_SIZE(env->tlb); ++i) { + HPPATLBEntry *e = &env->tlb[i]; + + if (e->entry_valid) { + interval_tree_insert(&e->itree, &env->tlb_root); + } else if (i < btlb_entries) { + /* btlb not in unused list */ + } else if (partial == NULL && e->itree.start < e->itree.last) { + partial = e; + } else { + *unused = e; + unused = &e->unused_next; + } + } + env->tlb_partial = partial; + *unused = NULL; + + return 0; +} + +static const VMStateField vmstate_tlb_fields[] = { + VMSTATE_ARRAY(tlb, CPUHPPAState, + ARRAY_SIZE(((CPUHPPAState *)0)->tlb), + 0, vmstate_tlb_entry, HPPATLBEntry), + VMSTATE_UINT32(tlb_last, CPUHPPAState), + VMSTATE_END_OF_LIST() +}; + +static const VMStateDescription vmstate_tlb = { + .name = "env/tlb", + .version_id = 1, + .minimum_version_id = 1, + .fields = vmstate_tlb_fields, + .pre_load = tlb_pre_load, + .post_load = tlb_post_load, +}; + +static const VMStateField vmstate_env_fields[] = { + VMSTATE_UINT64_ARRAY(gr, CPUHPPAState, 32), VMSTATE_UINT64_ARRAY(fr, CPUHPPAState, 32), VMSTATE_UINT64_ARRAY(sr, CPUHPPAState, 8), - VMSTATE_UINTTR_ARRAY(cr, CPUHPPAState, 32), - VMSTATE_UINTTR_ARRAY(cr_back, CPUHPPAState, 2), - VMSTATE_UINTTR_ARRAY(shadow, CPUHPPAState, 7), + VMSTATE_UINT64_ARRAY(cr, CPUHPPAState, 32), + VMSTATE_UINT64_ARRAY(cr_back, CPUHPPAState, 2), + VMSTATE_UINT64_ARRAY(shadow, CPUHPPAState, 7), /* Save the architecture value of the psw, not the internally expanded version. Since this architecture value does not @@ -145,28 +192,29 @@ static VMStateField vmstate_env_fields[] = { .offset = 0 }, - VMSTATE_UINTTR(iaoq_f, CPUHPPAState), - VMSTATE_UINTTR(iaoq_b, CPUHPPAState), + VMSTATE_UINT64(iaoq_f, CPUHPPAState), + VMSTATE_UINT64(iaoq_b, CPUHPPAState), VMSTATE_UINT64(iasq_f, CPUHPPAState), VMSTATE_UINT64(iasq_b, CPUHPPAState), VMSTATE_UINT32(fr0_shadow, CPUHPPAState), - - VMSTATE_ARRAY(tlb, CPUHPPAState, ARRAY_SIZE(((CPUHPPAState *)0)->tlb), - 0, vmstate_tlb, hppa_tlb_entry), - VMSTATE_UINT32(tlb_last, CPUHPPAState), - VMSTATE_END_OF_LIST() }; +static const VMStateDescription *vmstate_env_subsections[] = { + &vmstate_tlb, + NULL +}; + static const VMStateDescription vmstate_env = { .name = "env", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 3, + .minimum_version_id = 3, .fields = vmstate_env_fields, + .subsections = vmstate_env_subsections, }; -static VMStateField vmstate_cpu_fields[] = { +static const VMStateField vmstate_cpu_fields[] = { VMSTATE_CPU(), VMSTATE_STRUCT(env, HPPACPU, 1, vmstate_env, CPUHPPAState), VMSTATE_END_OF_LIST() diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index 350485f619..858ce6ec7f 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -25,72 +25,136 @@ #include "hw/core/cpu.h" #include "trace.h" -static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) +hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr) { - int i; - - for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) { - hppa_tlb_entry *ent = &env->tlb[i]; - if (ent->va_b <= addr && addr <= ent->va_e) { - trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid, - ent->va_b, ent->va_e, ent->pa); - return ent; - } + if (likely(extract64(addr, 58, 4) != 0xf)) { + /* Memory address space */ + return addr & MAKE_64BIT_MASK(0, 62); + } + if (extract64(addr, 54, 4) != 0) { + /* I/O address space */ + return addr | MAKE_64BIT_MASK(62, 2); + } + /* PDC address space */ + return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4); +} + +hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr) +{ + if (likely(extract32(addr, 28, 4) != 0xf)) { + /* Memory address space */ + return addr & MAKE_64BIT_MASK(0, 32); + } + if (extract32(addr, 24, 4) != 0) { + /* I/O address space */ + return addr | MAKE_64BIT_MASK(32, 32); + } + /* PDC address space */ + return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4); +} + +static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr) +{ + if (!hppa_is_pa20(env)) { + return addr; + } else if (env->psw & PSW_W) { + return hppa_abs_to_phys_pa2_w1(addr); + } else { + return hppa_abs_to_phys_pa2_w0(addr); + } +} + +static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) +{ + IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr); + + if (i) { + HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); + trace_hppa_tlb_find_entry(env, ent, ent->entry_valid, + ent->itree.start, ent->itree.last, ent->pa); + return ent; } trace_hppa_tlb_find_entry_not_found(env, addr); return NULL; } -static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent, +static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent, bool force_flush_btlb) { CPUState *cs = env_cpu(env); + bool is_btlb; if (!ent->entry_valid) { return; } - trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa); + trace_hppa_tlb_flush_ent(env, ent, ent->itree.start, + ent->itree.last, ent->pa); - tlb_flush_range_by_mmuidx(cs, ent->va_b, - ent->va_e - ent->va_b + 1, - HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); + tlb_flush_range_by_mmuidx(cs, ent->itree.start, + ent->itree.last - ent->itree.start + 1, + HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); - /* never clear BTLBs, unless forced to do so. */ - if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) { + /* Never clear BTLBs, unless forced to do so. */ + is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; + if (is_btlb && !force_flush_btlb) { return; } + interval_tree_remove(&ent->itree, &env->tlb_root); memset(ent, 0, sizeof(*ent)); - ent->va_b = -1; + + if (!is_btlb) { + ent->unused_next = env->tlb_unused; + env->tlb_unused = ent; + } } -static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env) +static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e) { - hppa_tlb_entry *ent; - uint32_t i; + IntervalTreeNode *i, *n; - if (env->tlb_last < HPPA_BTLB_ENTRIES || env->tlb_last >= ARRAY_SIZE(env->tlb)) { - i = HPPA_BTLB_ENTRIES; - env->tlb_last = HPPA_BTLB_ENTRIES + 1; - } else { - i = env->tlb_last; - env->tlb_last++; + i = interval_tree_iter_first(&env->tlb_root, va_b, va_e); + for (; i ; i = n) { + HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); + + /* + * Find the next entry now: In the normal case the current entry + * will be removed, but in the BTLB case it will remain. + */ + n = interval_tree_iter_next(i, va_b, va_e); + hppa_flush_tlb_ent(env, ent, false); } +} + +static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env) +{ + HPPATLBEntry *ent = env->tlb_unused; + + if (ent == NULL) { + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); + uint32_t i = env->tlb_last; - ent = &env->tlb[i]; + if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { + i = btlb_entries; + } + env->tlb_last = i + 1; - hppa_flush_tlb_ent(env, ent, false); + ent = &env->tlb[i]; + hppa_flush_tlb_ent(env, ent, false); + } + + env->tlb_unused = ent->unused_next; return ent; } int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, int type, hwaddr *pphys, int *pprot, - hppa_tlb_entry **tlb_entry) + HPPATLBEntry **tlb_entry) { hwaddr phys; int prot, r_prot, w_prot, x_prot, priv; - hppa_tlb_entry *ent; + HPPATLBEntry *ent; int ret = -1; if (tlb_entry) { @@ -106,7 +170,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, /* Find a valid tlb entry that matches the virtual address. */ ent = hppa_find_tlb(env, addr); - if (ent == NULL || !ent->entry_valid) { + if (ent == NULL) { phys = 0; prot = 0; ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS; @@ -118,7 +182,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, } /* We now know the physical address. */ - phys = ent->pa + (addr - ent->va_b); + phys = ent->pa + (addr - ent->itree.start); /* Map TLB access_rights field to QEMU protection. */ priv = MMU_IDX_TO_PRIV(mmu_idx); @@ -144,7 +208,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, } /* access_id == 0 means public page and no check is performed */ - if ((env->psw & PSW_P) && ent->access_id) { + if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { /* If bits [31:1] match, and bit 0 is set, suppress write. */ int match = ent->access_id * 2 + 1; @@ -197,7 +261,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, } egress: - *pphys = phys; + *pphys = phys = hppa_abs_to_phys(env, phys); *pprot = prot; trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys); return ret; @@ -213,7 +277,7 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) /* ??? We really ought to know if the code mmu is disabled too, in order to get the correct debugging dumps. */ if (!(cpu->env.psw & PSW_D)) { - return addr; + return hppa_abs_to_phys(&cpu->env, addr); } excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0, @@ -225,13 +289,60 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return excp == EXCP_DTLB_MISS ? -1 : phys; } +G_NORETURN static void +raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, + vaddr addr, bool mmu_disabled) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = excp; + + if (env->psw & PSW_Q) { + /* + * For pa1.x, the offset and space never overlap, and so we + * simply extract the high and low part of the virtual address. + * + * For pa2.0, the formation of these are described in section + * "Interruption Parameter Registers", page 2-15. + */ + env->cr[CR_IOR] = (uint32_t)addr; + env->cr[CR_ISR] = addr >> 32; + + if (hppa_is_pa20(env)) { + if (mmu_disabled) { + /* + * If data translation was disabled, the ISR contains + * the upper portion of the abs address, zero-extended. + */ + env->cr[CR_ISR] &= 0x3fffffff; + } else { + /* + * If data translation was enabled, the upper two bits + * of the IOR (the b field) are equal to the two space + * bits from the base register used to form the gva. + */ + uint64_t b; + + cpu_restore_state(cs, retaddr); + + b = env->gr[env->unwind_breg]; + b >>= (env->psw & PSW_W ? 62 : 30); + env->cr[CR_IOR] |= b << 62; + + cpu_loop_exit(cs); + } + } + } + cpu_loop_exit_restore(cs, retaddr); +} + bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, MMUAccessType type, int mmu_idx, bool probe, uintptr_t retaddr) { HPPACPU *cpu = HPPA_CPU(cs); CPUHPPAState *env = &cpu->env; - hppa_tlb_entry *ent; + HPPATLBEntry *ent; int prot, excp, a_prot; hwaddr phys; @@ -254,56 +365,51 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, return false; } trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); + /* Failure. Raise the indicated exception. */ - cs->exception_index = excp; - if (cpu->env.psw & PSW_Q) { - /* ??? Needs tweaking for hppa64. */ - cpu->env.cr[CR_IOR] = addr; - cpu->env.cr[CR_ISR] = addr >> 32; - } - cpu_loop_exit_restore(cs, retaddr); + raise_exception_with_ior(env, excp, retaddr, + addr, mmu_idx == MMU_PHYS_IDX); } trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, size, type, mmu_idx); - /* Success! Store the translation into the QEMU TLB. */ + + /* + * Success! Store the translation into the QEMU TLB. + * Note that we always install a single-page entry, because that + * is what works best with softmmu -- anything else will trigger + * the large page protection mask. We do not require this, + * because we record the large page here in the hppa tlb. + */ tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, - prot, mmu_idx, TARGET_PAGE_SIZE << (ent ? 2 * ent->page_size : 0)); + prot, mmu_idx, TARGET_PAGE_SIZE); return true; } /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ -void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg) +void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) { - hppa_tlb_entry *empty = NULL; - int i; - - /* Zap any old entries covering ADDR; notice empty entries on the way. */ - for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) { - hppa_tlb_entry *ent = &env->tlb[i]; - if (ent->va_b <= addr && addr <= ent->va_e) { - if (ent->entry_valid) { - hppa_flush_tlb_ent(env, ent, false); - } - if (!empty) { - empty = ent; - } - } - } + HPPATLBEntry *ent; - /* If we didn't see an empty entry, evict one. */ - if (empty == NULL) { - empty = hppa_alloc_tlb_ent(env); + /* Zap any old entries covering ADDR. */ + addr &= TARGET_PAGE_MASK; + hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); + + ent = env->tlb_partial; + if (ent == NULL) { + ent = hppa_alloc_tlb_ent(env); + env->tlb_partial = ent; } - /* Note that empty->entry_valid == 0 already. */ - empty->va_b = addr & TARGET_PAGE_MASK; - empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1; - empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; - trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa); + /* Note that ent->entry_valid == 0 already. */ + ent->itree.start = addr; + ent->itree.last = addr + TARGET_PAGE_SIZE - 1; + ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; + trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); } -static void set_access_bits(CPUHPPAState *env, hppa_tlb_entry *ent, target_ureg reg) +static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent, + target_ulong reg) { ent->access_id = extract32(reg, 1, 18); ent->u = extract32(reg, 19, 1); @@ -314,49 +420,153 @@ static void set_access_bits(CPUHPPAState *env, hppa_tlb_entry *ent, target_ureg ent->d = extract32(reg, 28, 1); ent->t = extract32(reg, 29, 1); ent->entry_valid = 1; + + interval_tree_insert(&ent->itree, &env->tlb_root); trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); } /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ -void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg) +void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) { - hppa_tlb_entry *ent = hppa_find_tlb(env, addr); + HPPATLBEntry *ent = env->tlb_partial; - if (unlikely(ent == NULL)) { - qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); - return; + if (ent) { + env->tlb_partial = NULL; + if (ent->itree.start <= addr && addr <= ent->itree.last) { + set_access_bits_pa11(env, ent, reg); + return; + } } + qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); +} + +static void itlbt_pa20(CPUHPPAState *env, target_ulong r1, + target_ulong r2, vaddr va_b) +{ + HPPATLBEntry *ent; + vaddr va_e; + uint64_t va_size; + int mask_shift; + + mask_shift = 2 * (r1 & 0xf); + va_size = TARGET_PAGE_SIZE << mask_shift; + va_b &= -va_size; + va_e = va_b + va_size - 1; + + hppa_flush_tlb_range(env, va_b, va_e); + ent = hppa_alloc_tlb_ent(env); + + ent->itree.start = va_b; + ent->itree.last = va_e; + ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift); + ent->t = extract64(r2, 61, 1); + ent->d = extract64(r2, 60, 1); + ent->b = extract64(r2, 59, 1); + ent->ar_type = extract64(r2, 56, 3); + ent->ar_pl1 = extract64(r2, 54, 2); + ent->ar_pl2 = extract64(r2, 52, 2); + ent->u = extract64(r2, 51, 1); + /* o = bit 50 */ + /* p = bit 49 */ + ent->access_id = extract64(r2, 1, 31); + ent->entry_valid = 1; - set_access_bits(env, ent, reg); + interval_tree_insert(&ent->itree, &env->tlb_root); + trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); + trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, + ent->ar_pl2, ent->ar_pl1, ent->ar_type, + ent->b, ent->d, ent->t); } -/* Purge (Insn/Data) TLB. This is explicitly page-based, and is - synchronous across all processors. */ +void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) +{ + vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]); + itlbt_pa20(env, r1, r2, va_b); +} + +void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) +{ + vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]); + itlbt_pa20(env, r1, r2, va_b); +} + +/* Purge (Insn/Data) TLB. */ static void ptlb_work(CPUState *cpu, run_on_cpu_data data) { CPUHPPAState *env = cpu_env(cpu); - target_ulong addr = (target_ulong) data.target_ptr; - hppa_tlb_entry *ent = hppa_find_tlb(env, addr); + vaddr start = data.target_ptr; + vaddr end; + + /* + * PA2.0 allows a range of pages encoded into GR[b], which we have + * copied into the bottom bits of the otherwise page-aligned address. + * PA1.x will always provide zero here, for a single page flush. + */ + end = start & 0xf; + start &= TARGET_PAGE_MASK; + end = TARGET_PAGE_SIZE << (2 * end); + end = start + end - 1; + + hppa_flush_tlb_range(env, start, end); +} - if (ent && ent->entry_valid) { - hppa_flush_tlb_ent(env, ent, false); - } +/* This is local to the current cpu. */ +void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr) +{ + trace_hppa_tlb_ptlb_local(env); + ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr)); } +/* This is synchronous across all processors. */ void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) { CPUState *src = env_cpu(env); CPUState *cpu; + bool wait = false; + trace_hppa_tlb_ptlb(env); run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); CPU_FOREACH(cpu) { if (cpu != src) { async_run_on_cpu(cpu, ptlb_work, data); + wait = true; } } - async_safe_run_on_cpu(src, ptlb_work, data); + if (wait) { + async_safe_run_on_cpu(src, ptlb_work, data); + } else { + ptlb_work(src, data); + } +} + +void hppa_ptlbe(CPUHPPAState *env) +{ + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); + uint32_t i; + + /* Zap the (non-btlb) tlb entries themselves. */ + memset(&env->tlb[btlb_entries], 0, + sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); + env->tlb_last = btlb_entries; + env->tlb_partial = NULL; + + /* Put them all onto the unused list. */ + env->tlb_unused = &env->tlb[btlb_entries]; + for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { + env->tlb[i].unused_next = &env->tlb[i + 1]; + } + + /* Re-initialize the interval tree with only the btlb entries. */ + memset(&env->tlb_root, 0, sizeof(env->tlb_root)); + for (i = 0; i < btlb_entries; ++i) { + if (env->tlb[i].entry_valid) { + interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); + } + } + + tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); } /* Purge (Insn/Data) TLB entry. This affects an implementation-defined @@ -365,17 +575,12 @@ void HELPER(ptlbe)(CPUHPPAState *env) { trace_hppa_tlb_ptlbe(env); qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n"); - memset(&env->tlb[HPPA_BTLB_ENTRIES], 0, - sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0])); - env->tlb_last = HPPA_BTLB_ENTRIES; - tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); + hppa_ptlbe(env); } void cpu_hppa_change_prot_id(CPUHPPAState *env) { - if (env->psw & PSW_P) { - tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); - } + tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK); } void HELPER(change_prot_id)(CPUHPPAState *env) @@ -383,7 +588,7 @@ void HELPER(change_prot_id)(CPUHPPAState *env) cpu_hppa_change_prot_id(env); } -target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr) +target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr) { hwaddr phys; int prot, excp; @@ -391,16 +596,11 @@ target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr) excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, &phys, &prot, NULL); if (excp >= 0) { - if (env->psw & PSW_Q) { - /* ??? Needs tweaking for hppa64. */ - env->cr[CR_IOR] = addr; - env->cr[CR_ISR] = addr >> 32; - } if (excp == EXCP_DTLB_MISS) { excp = EXCP_NA_DTLB_MISS; } trace_hppa_tlb_lpa_failed(env, addr); - hppa_dynamic_excp(env, excp, GETPC()); + raise_exception_with_ior(env, excp, GETPC(), addr, false); } trace_hppa_tlb_lpa_success(env, addr, phys); return phys; @@ -409,7 +609,7 @@ target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr) /* Return the ar_type of the TLB at VADDR, or -1. */ int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr) { - hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr); + HPPATLBEntry *ent = hppa_find_tlb(env, vaddr); return ent ? ent->ar_type : -1; } @@ -424,15 +624,17 @@ void HELPER(diag_btlb)(CPUHPPAState *env) unsigned int phys_page, len, slot; int mmu_idx = cpu_mmu_index(env, 0); uintptr_t ra = GETPC(); - hppa_tlb_entry *btlb; + HPPATLBEntry *btlb; uint64_t virt_page; uint32_t *vaddr; + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); -#ifdef TARGET_HPPA64 /* BTLBs are not supported on 64-bit CPUs */ - env->gr[28] = -1; /* nonexistent procedure */ - return; -#endif + if (btlb_entries == 0) { + env->gr[28] = -1; /* nonexistent procedure */ + return; + } + env->gr[28] = 0; /* PDC_OK */ switch (env->gr[25]) { @@ -446,8 +648,8 @@ void HELPER(diag_btlb)(CPUHPPAState *env) } else { vaddr[0] = cpu_to_be32(1); vaddr[1] = cpu_to_be32(16 * 1024); - vaddr[2] = cpu_to_be32(HPPA_BTLB_FIXED); - vaddr[3] = cpu_to_be32(HPPA_BTLB_VARIABLE); + vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED); + vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE); } break; case 1: @@ -464,15 +666,17 @@ void HELPER(diag_btlb)(CPUHPPAState *env) (long long) virt_page << TARGET_PAGE_BITS, (long long) (virt_page + len) << TARGET_PAGE_BITS, (long long) virt_page, phys_page, len, slot); - if (slot < HPPA_BTLB_ENTRIES) { + if (slot < btlb_entries) { btlb = &env->tlb[slot]; - /* force flush of possibly existing BTLB entry */ + + /* Force flush of possibly existing BTLB entry. */ hppa_flush_tlb_ent(env, btlb, true); - /* create new BTLB entry */ - btlb->va_b = virt_page << TARGET_PAGE_BITS; - btlb->va_e = btlb->va_b + len * TARGET_PAGE_SIZE - 1; + + /* Create new BTLB entry */ + btlb->itree.start = virt_page << TARGET_PAGE_BITS; + btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; btlb->pa = phys_page << TARGET_PAGE_BITS; - set_access_bits(env, btlb, env->gr[20]); + set_access_bits_pa11(env, btlb, env->gr[20]); btlb->t = 0; btlb->d = 1; } else { @@ -484,7 +688,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env) slot = env->gr[22]; qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", slot); - if (slot < HPPA_BTLB_ENTRIES) { + if (slot < btlb_entries) { btlb = &env->tlb[slot]; hppa_flush_tlb_ent(env, btlb, true); } else { @@ -494,7 +698,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env) case 3: /* Purge all BTLB entries */ qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n"); - for (slot = 0; slot < HPPA_BTLB_ENTRIES; slot++) { + for (slot = 0; slot < btlb_entries; slot++) { btlb = &env->tlb[slot]; hppa_flush_tlb_ent(env, btlb, true); } diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c index 837e2b3117..a0e31c0c25 100644 --- a/target/hppa/op_helper.c +++ b/target/hppa/op_helper.c @@ -42,25 +42,25 @@ G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra) cpu_loop_exit_restore(cs, ra); } -void HELPER(tsv)(CPUHPPAState *env, target_ureg cond) +void HELPER(tsv)(CPUHPPAState *env, target_ulong cond) { - if (unlikely((target_sreg)cond < 0)) { + if (unlikely((target_long)cond < 0)) { hppa_dynamic_excp(env, EXCP_OVERFLOW, GETPC()); } } -void HELPER(tcond)(CPUHPPAState *env, target_ureg cond) +void HELPER(tcond)(CPUHPPAState *env, target_ulong cond) { if (unlikely(cond)) { hppa_dynamic_excp(env, EXCP_COND, GETPC()); } } -static void atomic_store_3(CPUHPPAState *env, target_ulong addr, - uint32_t val, uintptr_t ra) +static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr, + uint32_t val, uint32_t mask, uintptr_t ra) { int mmu_idx = cpu_mmu_index(env, 0); - uint32_t old, new, cmp, mask, *haddr; + uint32_t old, new, cmp, *haddr; void *vaddr; vaddr = probe_access(env, addr, 3, MMU_DATA_STORE, mmu_idx, ra); @@ -81,7 +81,36 @@ static void atomic_store_3(CPUHPPAState *env, target_ulong addr, } } -static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val, +static void atomic_store_mask64(CPUHPPAState *env, target_ulong addr, + uint64_t val, uint64_t mask, + int size, uintptr_t ra) +{ +#ifdef CONFIG_ATOMIC64 + int mmu_idx = cpu_mmu_index(env, 0); + uint64_t old, new, cmp, *haddr; + void *vaddr; + + vaddr = probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, ra); + if (vaddr == NULL) { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + haddr = (uint64_t *)((uintptr_t)vaddr & -8); + + old = *haddr; + while (1) { + new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask)); + cmp = qatomic_cmpxchg__nocheck(haddr, old, new); + if (cmp == old) { + return; + } + old = cmp; + } +#else + cpu_loop_exit_atomic(env_cpu(env), ra); +#endif +} + +static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val, bool parallel, uintptr_t ra) { switch (addr & 3) { @@ -94,7 +123,7 @@ static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val, case 1: /* The 3 byte store must appear atomic. */ if (parallel) { - atomic_store_3(env, addr, val, ra); + atomic_store_mask32(env, addr, val, 0x00ffffffu, ra); } else { cpu_stb_data_ra(env, addr, val >> 16, ra); cpu_stw_data_ra(env, addr + 1, val, ra); @@ -106,25 +135,153 @@ static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val, } } -void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ureg val) +static void do_stdby_b(CPUHPPAState *env, target_ulong addr, uint64_t val, + bool parallel, uintptr_t ra) +{ + switch (addr & 7) { + case 7: + cpu_stb_data_ra(env, addr, val, ra); + break; + case 6: + cpu_stw_data_ra(env, addr, val, ra); + break; + case 5: + /* The 3 byte store must appear atomic. */ + if (parallel) { + atomic_store_mask32(env, addr, val, 0x00ffffffu, ra); + } else { + cpu_stb_data_ra(env, addr, val >> 16, ra); + cpu_stw_data_ra(env, addr + 1, val, ra); + } + break; + case 4: + cpu_stl_data_ra(env, addr, val, ra); + break; + case 3: + /* The 5 byte store must appear atomic. */ + if (parallel) { + atomic_store_mask64(env, addr, val, 0x000000ffffffffffull, 5, ra); + } else { + cpu_stb_data_ra(env, addr, val >> 32, ra); + cpu_stl_data_ra(env, addr + 1, val, ra); + } + break; + case 2: + /* The 6 byte store must appear atomic. */ + if (parallel) { + atomic_store_mask64(env, addr, val, 0x0000ffffffffffffull, 6, ra); + } else { + cpu_stw_data_ra(env, addr, val >> 32, ra); + cpu_stl_data_ra(env, addr + 2, val, ra); + } + break; + case 1: + /* The 7 byte store must appear atomic. */ + if (parallel) { + atomic_store_mask64(env, addr, val, 0x00ffffffffffffffull, 7, ra); + } else { + cpu_stb_data_ra(env, addr, val >> 48, ra); + cpu_stw_data_ra(env, addr + 1, val >> 32, ra); + cpu_stl_data_ra(env, addr + 3, val, ra); + } + break; + default: + cpu_stq_data_ra(env, addr, val, ra); + break; + } +} + +void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val) { do_stby_b(env, addr, val, false, GETPC()); } void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr, - target_ureg val) + target_ulong val) { do_stby_b(env, addr, val, true, GETPC()); } -static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val, +void HELPER(stdby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val) +{ + do_stdby_b(env, addr, val, false, GETPC()); +} + +void HELPER(stdby_b_parallel)(CPUHPPAState *env, target_ulong addr, + target_ulong val) +{ + do_stdby_b(env, addr, val, true, GETPC()); +} + +static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val, bool parallel, uintptr_t ra) { switch (addr & 3) { case 3: /* The 3 byte store must appear atomic. */ if (parallel) { - atomic_store_3(env, addr - 3, val, ra); + atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra); + } else { + cpu_stw_data_ra(env, addr - 3, val >> 16, ra); + cpu_stb_data_ra(env, addr - 1, val >> 8, ra); + } + break; + case 2: + cpu_stw_data_ra(env, addr - 2, val >> 16, ra); + break; + case 1: + cpu_stb_data_ra(env, addr - 1, val >> 24, ra); + break; + default: + /* Nothing is stored, but protection is checked and the + cacheline is marked dirty. */ + probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra); + break; + } +} + +static void do_stdby_e(CPUHPPAState *env, target_ulong addr, uint64_t val, + bool parallel, uintptr_t ra) +{ + switch (addr & 7) { + case 7: + /* The 7 byte store must appear atomic. */ + if (parallel) { + atomic_store_mask64(env, addr - 7, val, + 0xffffffffffffff00ull, 7, ra); + } else { + cpu_stl_data_ra(env, addr - 7, val >> 32, ra); + cpu_stw_data_ra(env, addr - 3, val >> 16, ra); + cpu_stb_data_ra(env, addr - 1, val >> 8, ra); + } + break; + case 6: + /* The 6 byte store must appear atomic. */ + if (parallel) { + atomic_store_mask64(env, addr - 6, val, + 0xffffffffffff0000ull, 6, ra); + } else { + cpu_stl_data_ra(env, addr - 6, val >> 32, ra); + cpu_stw_data_ra(env, addr - 2, val >> 16, ra); + } + break; + case 5: + /* The 5 byte store must appear atomic. */ + if (parallel) { + atomic_store_mask64(env, addr - 5, val, + 0xffffffffff000000ull, 5, ra); + } else { + cpu_stl_data_ra(env, addr - 5, val >> 32, ra); + cpu_stb_data_ra(env, addr - 1, val >> 24, ra); + } + break; + case 4: + cpu_stl_data_ra(env, addr - 4, val >> 32, ra); + break; + case 3: + /* The 3 byte store must appear atomic. */ + if (parallel) { + atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra); } else { cpu_stw_data_ra(env, addr - 3, val >> 16, ra); cpu_stb_data_ra(env, addr - 1, val >> 8, ra); @@ -144,17 +301,28 @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val, } } -void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ureg val) +void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val) { do_stby_e(env, addr, val, false, GETPC()); } void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr, - target_ureg val) + target_ulong val) { do_stby_e(env, addr, val, true, GETPC()); } +void HELPER(stdby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val) +{ + do_stdby_e(env, addr, val, false, GETPC()); +} + +void HELPER(stdby_e_parallel)(CPUHPPAState *env, target_ulong addr, + target_ulong val) +{ + do_stdby_e(env, addr, val, true, GETPC()); +} + void HELPER(ldc_check)(target_ulong addr) { if (unlikely(addr & 0xf)) { @@ -164,7 +332,7 @@ void HELPER(ldc_check)(target_ulong addr) } } -target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr, +target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr, uint32_t level, uint32_t want) { #ifdef CONFIG_USER_ONLY @@ -196,7 +364,7 @@ target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr, #endif } -target_ureg HELPER(read_interval_timer)(void) +target_ulong HELPER(read_interval_timer)(void) { #ifdef CONFIG_USER_ONLY /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. @@ -209,3 +377,113 @@ target_ureg HELPER(read_interval_timer)(void) return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2; #endif } + +uint64_t HELPER(hadd_ss)(uint64_t r1, uint64_t r2) +{ + uint64_t ret = 0; + + for (int i = 0; i < 64; i += 16) { + int f1 = sextract64(r1, i, 16); + int f2 = sextract64(r2, i, 16); + int fr = f1 + f2; + + fr = MIN(fr, INT16_MAX); + fr = MAX(fr, INT16_MIN); + ret = deposit64(ret, i, 16, fr); + } + return ret; +} + +uint64_t HELPER(hadd_us)(uint64_t r1, uint64_t r2) +{ + uint64_t ret = 0; + + for (int i = 0; i < 64; i += 16) { + int f1 = extract64(r1, i, 16); + int f2 = sextract64(r2, i, 16); + int fr = f1 + f2; + + fr = MIN(fr, UINT16_MAX); + fr = MAX(fr, 0); + ret = deposit64(ret, i, 16, fr); + } + return ret; +} + +uint64_t HELPER(havg)(uint64_t r1, uint64_t r2) +{ + uint64_t ret = 0; + + for (int i = 0; i < 64; i += 16) { + int f1 = extract64(r1, i, 16); + int f2 = extract64(r2, i, 16); + int fr = f1 + f2; + + ret = deposit64(ret, i, 16, (fr >> 1) | (fr & 1)); + } + return ret; +} + +uint64_t HELPER(hsub_ss)(uint64_t r1, uint64_t r2) +{ + uint64_t ret = 0; + + for (int i = 0; i < 64; i += 16) { + int f1 = sextract64(r1, i, 16); + int f2 = sextract64(r2, i, 16); + int fr = f1 - f2; + + fr = MIN(fr, INT16_MAX); + fr = MAX(fr, INT16_MIN); + ret = deposit64(ret, i, 16, fr); + } + return ret; +} + +uint64_t HELPER(hsub_us)(uint64_t r1, uint64_t r2) +{ + uint64_t ret = 0; + + for (int i = 0; i < 64; i += 16) { + int f1 = extract64(r1, i, 16); + int f2 = sextract64(r2, i, 16); + int fr = f1 - f2; + + fr = MIN(fr, UINT16_MAX); + fr = MAX(fr, 0); + ret = deposit64(ret, i, 16, fr); + } + return ret; +} + +uint64_t HELPER(hshladd)(uint64_t r1, uint64_t r2, uint32_t sh) +{ + uint64_t ret = 0; + + for (int i = 0; i < 64; i += 16) { + int f1 = sextract64(r1, i, 16); + int f2 = sextract64(r2, i, 16); + int fr = (f1 << sh) + f2; + + fr = MIN(fr, INT16_MAX); + fr = MAX(fr, INT16_MIN); + ret = deposit64(ret, i, 16, fr); + } + return ret; +} + +uint64_t HELPER(hshradd)(uint64_t r1, uint64_t r2, uint32_t sh) +{ + uint64_t ret = 0; + + for (int i = 0; i < 64; i += 16) { + int f1 = sextract64(r1, i, 16); + int f2 = sextract64(r2, i, 16); + int fr = (f1 >> sh) + f2; + + fr = MIN(fr, INT16_MAX); + fr = MAX(fr, INT16_MIN); + ret = deposit64(ret, i, 16, fr); + } + return ret; +} diff --git a/target/hppa/sys_helper.c b/target/hppa/sys_helper.c index 4bb4cf611c..a59245eed3 100644 --- a/target/hppa/sys_helper.c +++ b/target/hppa/sys_helper.c @@ -24,7 +24,7 @@ #include "qemu/timer.h" #include "sysemu/runstate.h" -void HELPER(write_interval_timer)(CPUHPPAState *env, target_ureg val) +void HELPER(write_interval_timer)(CPUHPPAState *env, target_ulong val) { HPPACPU *cpu = env_archcpu(env); uint64_t current = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); @@ -58,7 +58,7 @@ void HELPER(reset)(CPUHPPAState *env) helper_excp(env, EXCP_HLT); } -target_ureg HELPER(swap_system_mask)(CPUHPPAState *env, target_ureg nsm) +target_ulong HELPER(swap_system_mask)(CPUHPPAState *env, target_ulong nsm) { target_ulong psw = env->psw; /* @@ -80,6 +80,16 @@ void HELPER(rfi)(CPUHPPAState *env) env->iasq_b = (uint64_t)env->cr_back[0] << 32; env->iaoq_f = env->cr[CR_IIAOQ]; env->iaoq_b = env->cr_back[1]; + + /* + * For pa2.0, IIASQ is the top bits of the virtual address. + * To recreate the space identifier, remove the offset bits. + */ + if (hppa_is_pa20(env)) { + env->iasq_f &= ~env->iaoq_f; + env->iasq_b &= ~env->iaoq_b; + } + cpu_hppa_put_psw(env, env->cr[CR_IPSW]); } diff --git a/target/hppa/trace-events b/target/hppa/trace-events index 8931517890..a10ba73d5d 100644 --- a/target/hppa/trace-events +++ b/target/hppa/trace-events @@ -10,6 +10,7 @@ disable hppa_tlb_fill_success(void *env, uint64_t addr, uint64_t phys, int size, disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx" disable hppa_tlb_itlbp(void *env, void *ent, int access_id, int u, int pl2, int pl1, int type, int b, int d, int t) "env=%p ent=%p access_id=%x u=%d pl2=%d pl1=%d type=%d b=%d d=%d t=%d" disable hppa_tlb_ptlb(void *env) "env=%p" +disable hppa_tlb_ptlb_local(void *env) "env=%p" disable hppa_tlb_ptlbe(void *env) "env=%p" disable hppa_tlb_lpa_success(void *env, uint64_t addr, uint64_t phys) "env=%p addr=0x%lx phys=0x%lx" disable hppa_tlb_lpa_failed(void *env, uint64_t addr) "env=%p addr=0x%lx" diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 9f3ba9f42f..bcce65d587 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -23,6 +23,7 @@ #include "qemu/host-utils.h" #include "exec/exec-all.h" #include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/translator.h" @@ -32,240 +33,35 @@ #include "exec/helper-info.c.inc" #undef HELPER_H - -/* Since we have a distinction between register size and address size, - we need to redefine all of these. */ - -#undef TCGv +/* Choose to use explicit sizes within this file. */ #undef tcg_temp_new -#undef tcg_global_mem_new - -#if TARGET_LONG_BITS == 64 -#define TCGv_tl TCGv_i64 -#define tcg_temp_new_tl tcg_temp_new_i64 -#if TARGET_REGISTER_BITS == 64 -#define tcg_gen_extu_reg_tl tcg_gen_mov_i64 -#else -#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64 -#endif -#else -#define TCGv_tl TCGv_i32 -#define tcg_temp_new_tl tcg_temp_new_i32 -#define tcg_gen_extu_reg_tl tcg_gen_mov_i32 -#endif - -#if TARGET_REGISTER_BITS == 64 -#define TCGv_reg TCGv_i64 - -#define tcg_temp_new tcg_temp_new_i64 -#define tcg_global_mem_new tcg_global_mem_new_i64 - -#define tcg_gen_movi_reg tcg_gen_movi_i64 -#define tcg_gen_mov_reg tcg_gen_mov_i64 -#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64 -#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64 -#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64 -#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64 -#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64 -#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64 -#define tcg_gen_ld_reg tcg_gen_ld_i64 -#define tcg_gen_st8_reg tcg_gen_st8_i64 -#define tcg_gen_st16_reg tcg_gen_st16_i64 -#define tcg_gen_st32_reg tcg_gen_st32_i64 -#define tcg_gen_st_reg tcg_gen_st_i64 -#define tcg_gen_add_reg tcg_gen_add_i64 -#define tcg_gen_addi_reg tcg_gen_addi_i64 -#define tcg_gen_sub_reg tcg_gen_sub_i64 -#define tcg_gen_neg_reg tcg_gen_neg_i64 -#define tcg_gen_subfi_reg tcg_gen_subfi_i64 -#define tcg_gen_subi_reg tcg_gen_subi_i64 -#define tcg_gen_and_reg tcg_gen_and_i64 -#define tcg_gen_andi_reg tcg_gen_andi_i64 -#define tcg_gen_or_reg tcg_gen_or_i64 -#define tcg_gen_ori_reg tcg_gen_ori_i64 -#define tcg_gen_xor_reg tcg_gen_xor_i64 -#define tcg_gen_xori_reg tcg_gen_xori_i64 -#define tcg_gen_not_reg tcg_gen_not_i64 -#define tcg_gen_shl_reg tcg_gen_shl_i64 -#define tcg_gen_shli_reg tcg_gen_shli_i64 -#define tcg_gen_shr_reg tcg_gen_shr_i64 -#define tcg_gen_shri_reg tcg_gen_shri_i64 -#define tcg_gen_sar_reg tcg_gen_sar_i64 -#define tcg_gen_sari_reg tcg_gen_sari_i64 -#define tcg_gen_brcond_reg tcg_gen_brcond_i64 -#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64 -#define tcg_gen_setcond_reg tcg_gen_setcond_i64 -#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64 -#define tcg_gen_mul_reg tcg_gen_mul_i64 -#define tcg_gen_muli_reg tcg_gen_muli_i64 -#define tcg_gen_div_reg tcg_gen_div_i64 -#define tcg_gen_rem_reg tcg_gen_rem_i64 -#define tcg_gen_divu_reg tcg_gen_divu_i64 -#define tcg_gen_remu_reg tcg_gen_remu_i64 -#define tcg_gen_discard_reg tcg_gen_discard_i64 -#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32 -#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64 -#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64 -#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64 -#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64 -#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64 -#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64 -#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64 -#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64 -#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64 -#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64 -#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64 -#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64 -#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64 -#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64 -#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64 -#define tcg_gen_andc_reg tcg_gen_andc_i64 -#define tcg_gen_eqv_reg tcg_gen_eqv_i64 -#define tcg_gen_nand_reg tcg_gen_nand_i64 -#define tcg_gen_nor_reg tcg_gen_nor_i64 -#define tcg_gen_orc_reg tcg_gen_orc_i64 -#define tcg_gen_clz_reg tcg_gen_clz_i64 -#define tcg_gen_ctz_reg tcg_gen_ctz_i64 -#define tcg_gen_clzi_reg tcg_gen_clzi_i64 -#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64 -#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64 -#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64 -#define tcg_gen_rotl_reg tcg_gen_rotl_i64 -#define tcg_gen_rotli_reg tcg_gen_rotli_i64 -#define tcg_gen_rotr_reg tcg_gen_rotr_i64 -#define tcg_gen_rotri_reg tcg_gen_rotri_i64 -#define tcg_gen_deposit_reg tcg_gen_deposit_i64 -#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64 -#define tcg_gen_extract_reg tcg_gen_extract_i64 -#define tcg_gen_sextract_reg tcg_gen_sextract_i64 -#define tcg_gen_extract2_reg tcg_gen_extract2_i64 -#define tcg_constant_reg tcg_constant_i64 -#define tcg_gen_movcond_reg tcg_gen_movcond_i64 -#define tcg_gen_add2_reg tcg_gen_add2_i64 -#define tcg_gen_sub2_reg tcg_gen_sub2_i64 -#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64 -#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64 -#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64 -#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr -#else -#define TCGv_reg TCGv_i32 -#define tcg_temp_new tcg_temp_new_i32 -#define tcg_global_mem_new tcg_global_mem_new_i32 - -#define tcg_gen_movi_reg tcg_gen_movi_i32 -#define tcg_gen_mov_reg tcg_gen_mov_i32 -#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32 -#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32 -#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32 -#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32 -#define tcg_gen_ld32u_reg tcg_gen_ld_i32 -#define tcg_gen_ld32s_reg tcg_gen_ld_i32 -#define tcg_gen_ld_reg tcg_gen_ld_i32 -#define tcg_gen_st8_reg tcg_gen_st8_i32 -#define tcg_gen_st16_reg tcg_gen_st16_i32 -#define tcg_gen_st32_reg tcg_gen_st32_i32 -#define tcg_gen_st_reg tcg_gen_st_i32 -#define tcg_gen_add_reg tcg_gen_add_i32 -#define tcg_gen_addi_reg tcg_gen_addi_i32 -#define tcg_gen_sub_reg tcg_gen_sub_i32 -#define tcg_gen_neg_reg tcg_gen_neg_i32 -#define tcg_gen_subfi_reg tcg_gen_subfi_i32 -#define tcg_gen_subi_reg tcg_gen_subi_i32 -#define tcg_gen_and_reg tcg_gen_and_i32 -#define tcg_gen_andi_reg tcg_gen_andi_i32 -#define tcg_gen_or_reg tcg_gen_or_i32 -#define tcg_gen_ori_reg tcg_gen_ori_i32 -#define tcg_gen_xor_reg tcg_gen_xor_i32 -#define tcg_gen_xori_reg tcg_gen_xori_i32 -#define tcg_gen_not_reg tcg_gen_not_i32 -#define tcg_gen_shl_reg tcg_gen_shl_i32 -#define tcg_gen_shli_reg tcg_gen_shli_i32 -#define tcg_gen_shr_reg tcg_gen_shr_i32 -#define tcg_gen_shri_reg tcg_gen_shri_i32 -#define tcg_gen_sar_reg tcg_gen_sar_i32 -#define tcg_gen_sari_reg tcg_gen_sari_i32 -#define tcg_gen_brcond_reg tcg_gen_brcond_i32 -#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32 -#define tcg_gen_setcond_reg tcg_gen_setcond_i32 -#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32 -#define tcg_gen_mul_reg tcg_gen_mul_i32 -#define tcg_gen_muli_reg tcg_gen_muli_i32 -#define tcg_gen_div_reg tcg_gen_div_i32 -#define tcg_gen_rem_reg tcg_gen_rem_i32 -#define tcg_gen_divu_reg tcg_gen_divu_i32 -#define tcg_gen_remu_reg tcg_gen_remu_i32 -#define tcg_gen_discard_reg tcg_gen_discard_i32 -#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32 -#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32 -#define tcg_gen_extu_i32_reg tcg_gen_mov_i32 -#define tcg_gen_ext_i32_reg tcg_gen_mov_i32 -#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64 -#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64 -#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32 -#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32 -#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32 -#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32 -#define tcg_gen_ext32u_reg tcg_gen_mov_i32 -#define tcg_gen_ext32s_reg tcg_gen_mov_i32 -#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32 -#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32 -#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64 -#define tcg_gen_andc_reg tcg_gen_andc_i32 -#define tcg_gen_eqv_reg tcg_gen_eqv_i32 -#define tcg_gen_nand_reg tcg_gen_nand_i32 -#define tcg_gen_nor_reg tcg_gen_nor_i32 -#define tcg_gen_orc_reg tcg_gen_orc_i32 -#define tcg_gen_clz_reg tcg_gen_clz_i32 -#define tcg_gen_ctz_reg tcg_gen_ctz_i32 -#define tcg_gen_clzi_reg tcg_gen_clzi_i32 -#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32 -#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32 -#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32 -#define tcg_gen_rotl_reg tcg_gen_rotl_i32 -#define tcg_gen_rotli_reg tcg_gen_rotli_i32 -#define tcg_gen_rotr_reg tcg_gen_rotr_i32 -#define tcg_gen_rotri_reg tcg_gen_rotri_i32 -#define tcg_gen_deposit_reg tcg_gen_deposit_i32 -#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32 -#define tcg_gen_extract_reg tcg_gen_extract_i32 -#define tcg_gen_sextract_reg tcg_gen_sextract_i32 -#define tcg_gen_extract2_reg tcg_gen_extract2_i32 -#define tcg_constant_reg tcg_constant_i32 -#define tcg_gen_movcond_reg tcg_gen_movcond_i32 -#define tcg_gen_add2_reg tcg_gen_add2_i32 -#define tcg_gen_sub2_reg tcg_gen_sub2_i32 -#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32 -#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32 -#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32 -#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr -#endif /* TARGET_REGISTER_BITS */ typedef struct DisasCond { TCGCond c; - TCGv_reg a0, a1; + TCGv_i64 a0, a1; } DisasCond; typedef struct DisasContext { DisasContextBase base; CPUState *cs; + TCGOp *insn_start; - target_ureg iaoq_f; - target_ureg iaoq_b; - target_ureg iaoq_n; - TCGv_reg iaoq_n_var; - - int ntempr, ntempl; - TCGv_reg tempr[8]; - TCGv_tl templ[4]; + uint64_t iaoq_f; + uint64_t iaoq_b; + uint64_t iaoq_n; + TCGv_i64 iaoq_n_var; DisasCond null_cond; TCGLabel *null_lab; + TCGv_i64 zero; + uint32_t insn; uint32_t tb_flags; int mmu_idx; int privilege; bool psw_n_nonzero; + bool is_pa20; #ifdef CONFIG_USER_ONLY MemOp unalign; @@ -332,6 +128,23 @@ static int expand_shl11(DisasContext *ctx, int val) return val << 11; } +static int assemble_6(DisasContext *ctx, int val) +{ + /* + * Officially, 32 * x + 32 - y. + * Here, x is already in bit 5, and y is [4:0]. + * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1, + * with the overflow from bit 4 summing with x. + */ + return (val ^ 31) + 1; +} + +/* Translate CMPI doubleword conditions to standard. */ +static int cmpbid_c(DisasContext *ctx, int val) +{ + return val ? val : 4; /* 0 == "*<<" */ +} + /* Include the auto-generated decoder. */ #include "decode-insns.c.inc" @@ -350,24 +163,24 @@ static int expand_shl11(DisasContext *ctx, int val) #define DISAS_EXIT DISAS_TARGET_3 /* global register indexes */ -static TCGv_reg cpu_gr[32]; +static TCGv_i64 cpu_gr[32]; static TCGv_i64 cpu_sr[4]; static TCGv_i64 cpu_srH; -static TCGv_reg cpu_iaoq_f; -static TCGv_reg cpu_iaoq_b; +static TCGv_i64 cpu_iaoq_f; +static TCGv_i64 cpu_iaoq_b; static TCGv_i64 cpu_iasq_f; static TCGv_i64 cpu_iasq_b; -static TCGv_reg cpu_sar; -static TCGv_reg cpu_psw_n; -static TCGv_reg cpu_psw_v; -static TCGv_reg cpu_psw_cb; -static TCGv_reg cpu_psw_cb_msb; +static TCGv_i64 cpu_sar; +static TCGv_i64 cpu_psw_n; +static TCGv_i64 cpu_psw_v; +static TCGv_i64 cpu_psw_cb; +static TCGv_i64 cpu_psw_cb_msb; void hppa_translate_init(void) { #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } - typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar; + typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar; static const GlobalVar vars[] = { { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, DEF_VAR(psw_n), @@ -422,6 +235,13 @@ void hppa_translate_init(void) "iasq_b"); } +static void set_insn_breg(DisasContext *ctx, int breg) +{ + assert(ctx->insn_start != NULL); + tcg_set_insn_start_param(ctx->insn_start, 2, breg); + ctx->insn_start = NULL; +} + static DisasCond cond_make_f(void) { return (DisasCond){ @@ -445,36 +265,36 @@ static DisasCond cond_make_n(void) return (DisasCond){ .c = TCG_COND_NE, .a0 = cpu_psw_n, - .a1 = tcg_constant_reg(0) + .a1 = tcg_constant_i64(0) }; } -static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0) +static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) { assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); - return (DisasCond){ - .c = c, .a0 = a0, .a1 = tcg_constant_reg(0) - }; + return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 }; } -static DisasCond cond_make_0(TCGCond c, TCGv_reg a0) +static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0) { - TCGv_reg tmp = tcg_temp_new(); - tcg_gen_mov_reg(tmp, a0); - return cond_make_0_tmp(c, tmp); + return cond_make_tmp(c, a0, tcg_constant_i64(0)); } -static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1) +static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0) { - DisasCond r = { .c = c }; + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_mov_i64(tmp, a0); + return cond_make_0_tmp(c, tmp); +} - assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); - r.a0 = tcg_temp_new(); - tcg_gen_mov_reg(r.a0, a0); - r.a1 = tcg_temp_new(); - tcg_gen_mov_reg(r.a1, a1); +static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) +{ + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); - return r; + tcg_gen_mov_i64(t0, a0); + tcg_gen_mov_i64(t1, a1); + return cond_make_tmp(c, t0, t1); } static void cond_free(DisasCond *cond) @@ -492,60 +312,35 @@ static void cond_free(DisasCond *cond) } } -static TCGv_reg get_temp(DisasContext *ctx) -{ - unsigned i = ctx->ntempr++; - g_assert(i < ARRAY_SIZE(ctx->tempr)); - return ctx->tempr[i] = tcg_temp_new(); -} - -#ifndef CONFIG_USER_ONLY -static TCGv_tl get_temp_tl(DisasContext *ctx) -{ - unsigned i = ctx->ntempl++; - g_assert(i < ARRAY_SIZE(ctx->templ)); - return ctx->templ[i] = tcg_temp_new_tl(); -} -#endif - -static TCGv_reg load_const(DisasContext *ctx, target_sreg v) -{ - TCGv_reg t = get_temp(ctx); - tcg_gen_movi_reg(t, v); - return t; -} - -static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg) +static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg) { if (reg == 0) { - TCGv_reg t = get_temp(ctx); - tcg_gen_movi_reg(t, 0); - return t; + return ctx->zero; } else { return cpu_gr[reg]; } } -static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg) +static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg) { if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { - return get_temp(ctx); + return tcg_temp_new_i64(); } else { return cpu_gr[reg]; } } -static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t) +static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t) { if (ctx->null_cond.c != TCG_COND_NEVER) { - tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0, + tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0, ctx->null_cond.a1, dest, t); } else { - tcg_gen_mov_reg(dest, t); + tcg_gen_mov_i64(dest, t); } } -static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t) +static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t) { if (reg != 0) { save_or_nullify(ctx, cpu_gr[reg], t); @@ -653,18 +448,18 @@ static void nullify_over(DisasContext *ctx) /* If we're using PSW[N], copy it to a temp because... */ if (ctx->null_cond.a0 == cpu_psw_n) { - ctx->null_cond.a0 = tcg_temp_new(); - tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n); + ctx->null_cond.a0 = tcg_temp_new_i64(); + tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n); } /* ... we clear it before branching over the implementation, so that (1) it's clear after nullifying this insn and (2) if this insn nullifies the next, PSW[N] is valid. */ if (ctx->psw_n_nonzero) { ctx->psw_n_nonzero = false; - tcg_gen_movi_reg(cpu_psw_n, 0); + tcg_gen_movi_i64(cpu_psw_n, 0); } - tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0, + tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0, ctx->null_cond.a1, ctx->null_lab); cond_free(&ctx->null_cond); } @@ -675,12 +470,12 @@ static void nullify_save(DisasContext *ctx) { if (ctx->null_cond.c == TCG_COND_NEVER) { if (ctx->psw_n_nonzero) { - tcg_gen_movi_reg(cpu_psw_n, 0); + tcg_gen_movi_i64(cpu_psw_n, 0); } return; } if (ctx->null_cond.a0 != cpu_psw_n) { - tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n, + tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n, ctx->null_cond.a0, ctx->null_cond.a1); ctx->psw_n_nonzero = true; } @@ -693,7 +488,7 @@ static void nullify_save(DisasContext *ctx) static void nullify_set(DisasContext *ctx, bool x) { if (ctx->psw_n_nonzero || x) { - tcg_gen_movi_reg(cpu_psw_n, x); + tcg_gen_movi_i64(cpu_psw_n, x); } } @@ -736,16 +531,36 @@ static bool nullify_end(DisasContext *ctx) return true; } -static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval) +static uint64_t gva_offset_mask(DisasContext *ctx) +{ + return (ctx->tb_flags & PSW_W + ? MAKE_64BIT_MASK(0, 62) + : MAKE_64BIT_MASK(0, 32)); +} + +static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest, + uint64_t ival, TCGv_i64 vval) { - if (unlikely(ival == -1)) { - tcg_gen_mov_reg(dest, vval); + uint64_t mask = gva_offset_mask(ctx); + + if (ival != -1) { + tcg_gen_movi_i64(dest, ival & mask); + return; + } + tcg_debug_assert(vval != NULL); + + /* + * We know that the IAOQ is already properly masked. + * This optimization is primarily for "iaoq_f = iaoq_b". + */ + if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) { + tcg_gen_mov_i64(dest, vval); } else { - tcg_gen_movi_reg(dest, ival); + tcg_gen_andi_i64(dest, vval, mask); } } -static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp) +static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp) { return ctx->iaoq_f + disp + 8; } @@ -757,8 +572,8 @@ static void gen_excp_1(int exception) static void gen_excp(DisasContext *ctx, int exception) { - copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); - copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); + copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); + copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); nullify_save(ctx); gen_excp_1(exception); ctx->base.is_jmp = DISAS_NORETURN; @@ -767,7 +582,7 @@ static void gen_excp(DisasContext *ctx, int exception) static bool gen_excp_iir(DisasContext *ctx, int exc) { nullify_over(ctx); - tcg_gen_st_reg(tcg_constant_reg(ctx->insn), + tcg_gen_st_i64(tcg_constant_i64(ctx->insn), tcg_env, offsetof(CPUHPPAState, cr[CR_IIR])); gen_excp(ctx, exc); return nullify_end(ctx); @@ -790,7 +605,7 @@ static bool gen_illegal(DisasContext *ctx) } while (0) #endif -static bool use_goto_tb(DisasContext *ctx, target_ureg dest) +static bool use_goto_tb(DisasContext *ctx, uint64_t dest) { return translator_use_goto_tb(&ctx->base, dest); } @@ -806,16 +621,16 @@ static bool use_nullify_skip(DisasContext *ctx) } static void gen_goto_tb(DisasContext *ctx, int which, - target_ureg f, target_ureg b) + uint64_t f, uint64_t b) { if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { tcg_gen_goto_tb(which); - tcg_gen_movi_reg(cpu_iaoq_f, f); - tcg_gen_movi_reg(cpu_iaoq_b, b); + copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL); + copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL); tcg_gen_exit_tb(ctx->base.tb, which); } else { - copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); - copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); + copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b); + copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var); tcg_gen_lookup_and_goto_ptr(); } } @@ -830,27 +645,41 @@ static bool cond_need_cb(int c) return c == 4 || c == 5; } +/* Need extensions from TCGv_i32 to TCGv_i64. */ +static bool cond_need_ext(DisasContext *ctx, bool d) +{ + return !(ctx->is_pa20 && d); +} + /* * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of * the Parisc 1.1 Architecture Reference Manual for details. */ -static DisasCond do_cond(unsigned cf, TCGv_reg res, - TCGv_reg cb_msb, TCGv_reg sv) +static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, + TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv) { DisasCond cond; - TCGv_reg tmp; + TCGv_i64 tmp; switch (cf >> 1) { case 0: /* Never / TR (0 / 1) */ cond = cond_make_f(); break; case 1: /* = / <> (Z / !Z) */ + if (cond_need_ext(ctx, d)) { + tmp = tcg_temp_new_i64(); + tcg_gen_ext32u_i64(tmp, res); + res = tmp; + } cond = cond_make_0(TCG_COND_EQ, res); break; case 2: /* < / >= (N ^ V / !(N ^ V) */ - tmp = tcg_temp_new(); - tcg_gen_xor_reg(tmp, res, sv); + tmp = tcg_temp_new_i64(); + tcg_gen_xor_i64(tmp, res, sv); + if (cond_need_ext(ctx, d)) { + tcg_gen_ext32s_i64(tmp, tmp); + } cond = cond_make_0_tmp(TCG_COND_LT, tmp); break; case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ @@ -863,27 +692,42 @@ static DisasCond do_cond(unsigned cf, TCGv_reg res, * !(~(res ^ sv) >> 31) | !res * !(~(res ^ sv) >> 31 & res) */ - tmp = tcg_temp_new(); - tcg_gen_eqv_reg(tmp, res, sv); - tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); - tcg_gen_and_reg(tmp, tmp, res); + tmp = tcg_temp_new_i64(); + tcg_gen_eqv_i64(tmp, res, sv); + if (cond_need_ext(ctx, d)) { + tcg_gen_sextract_i64(tmp, tmp, 31, 1); + tcg_gen_and_i64(tmp, tmp, res); + tcg_gen_ext32u_i64(tmp, tmp); + } else { + tcg_gen_sari_i64(tmp, tmp, 63); + tcg_gen_and_i64(tmp, tmp, res); + } cond = cond_make_0_tmp(TCG_COND_EQ, tmp); break; case 4: /* NUV / UV (!C / C) */ + /* Only bit 0 of cb_msb is ever set. */ cond = cond_make_0(TCG_COND_EQ, cb_msb); break; case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ - tmp = tcg_temp_new(); - tcg_gen_neg_reg(tmp, cb_msb); - tcg_gen_and_reg(tmp, tmp, res); + tmp = tcg_temp_new_i64(); + tcg_gen_neg_i64(tmp, cb_msb); + tcg_gen_and_i64(tmp, tmp, res); + if (cond_need_ext(ctx, d)) { + tcg_gen_ext32u_i64(tmp, tmp); + } cond = cond_make_0_tmp(TCG_COND_EQ, tmp); break; case 6: /* SV / NSV (V / !V) */ + if (cond_need_ext(ctx, d)) { + tmp = tcg_temp_new_i64(); + tcg_gen_ext32s_i64(tmp, sv); + sv = tmp; + } cond = cond_make_0(TCG_COND_LT, sv); break; case 7: /* OD / EV */ - tmp = tcg_temp_new(); - tcg_gen_andi_reg(tmp, res, 1); + tmp = tcg_temp_new_i64(); + tcg_gen_andi_i64(tmp, res, 1); cond = cond_make_0_tmp(TCG_COND_NE, tmp); break; default: @@ -900,35 +744,55 @@ static DisasCond do_cond(unsigned cf, TCGv_reg res, can use the inputs directly. This can allow other computation to be deleted as unused. */ -static DisasCond do_sub_cond(unsigned cf, TCGv_reg res, - TCGv_reg in1, TCGv_reg in2, TCGv_reg sv) +static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d, + TCGv_i64 res, TCGv_i64 in1, + TCGv_i64 in2, TCGv_i64 sv) { - DisasCond cond; + TCGCond tc; + bool ext_uns; switch (cf >> 1) { case 1: /* = / <> */ - cond = cond_make(TCG_COND_EQ, in1, in2); + tc = TCG_COND_EQ; + ext_uns = true; break; case 2: /* < / >= */ - cond = cond_make(TCG_COND_LT, in1, in2); + tc = TCG_COND_LT; + ext_uns = false; break; case 3: /* <= / > */ - cond = cond_make(TCG_COND_LE, in1, in2); + tc = TCG_COND_LE; + ext_uns = false; break; case 4: /* << / >>= */ - cond = cond_make(TCG_COND_LTU, in1, in2); + tc = TCG_COND_LTU; + ext_uns = true; break; case 5: /* <<= / >> */ - cond = cond_make(TCG_COND_LEU, in1, in2); + tc = TCG_COND_LEU; + ext_uns = true; break; default: - return do_cond(cf, res, NULL, sv); + return do_cond(ctx, cf, d, res, NULL, sv); } + if (cf & 1) { - cond.c = tcg_invert_cond(cond.c); + tc = tcg_invert_cond(tc); } + if (cond_need_ext(ctx, d)) { + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); - return cond; + if (ext_uns) { + tcg_gen_ext32u_i64(t1, in1); + tcg_gen_ext32u_i64(t2, in2); + } else { + tcg_gen_ext32s_i64(t1, in1); + tcg_gen_ext32s_i64(t2, in2); + } + return cond_make_tmp(tc, t1, t2); + } + return cond_make(tc, in1, in2); } /* @@ -940,8 +804,12 @@ static DisasCond do_sub_cond(unsigned cf, TCGv_reg res, * how cases c={2,3} are treated. */ -static DisasCond do_log_cond(unsigned cf, TCGv_reg res) +static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d, + TCGv_i64 res) { + TCGCond tc; + bool ext_uns; + switch (cf) { case 0: /* never */ case 9: /* undef, C */ @@ -956,30 +824,55 @@ static DisasCond do_log_cond(unsigned cf, TCGv_reg res) return cond_make_t(); case 2: /* == */ - return cond_make_0(TCG_COND_EQ, res); + tc = TCG_COND_EQ; + ext_uns = true; + break; case 3: /* <> */ - return cond_make_0(TCG_COND_NE, res); + tc = TCG_COND_NE; + ext_uns = true; + break; case 4: /* < */ - return cond_make_0(TCG_COND_LT, res); + tc = TCG_COND_LT; + ext_uns = false; + break; case 5: /* >= */ - return cond_make_0(TCG_COND_GE, res); + tc = TCG_COND_GE; + ext_uns = false; + break; case 6: /* <= */ - return cond_make_0(TCG_COND_LE, res); + tc = TCG_COND_LE; + ext_uns = false; + break; case 7: /* > */ - return cond_make_0(TCG_COND_GT, res); + tc = TCG_COND_GT; + ext_uns = false; + break; case 14: /* OD */ case 15: /* EV */ - return do_cond(cf, res, NULL, NULL); + return do_cond(ctx, cf, d, res, NULL, NULL); default: g_assert_not_reached(); } + + if (cond_need_ext(ctx, d)) { + TCGv_i64 tmp = tcg_temp_new_i64(); + + if (ext_uns) { + tcg_gen_ext32u_i64(tmp, res); + } else { + tcg_gen_ext32s_i64(tmp, res); + } + return cond_make_0_tmp(tc, tmp); + } + return cond_make_0(tc, res); } /* Similar, but for shift/extract/deposit conditions. */ -static DisasCond do_sed_cond(unsigned orig, TCGv_reg res) +static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d, + TCGv_i64 res) { unsigned c, f; @@ -992,28 +885,29 @@ static DisasCond do_sed_cond(unsigned orig, TCGv_reg res) } f = (orig & 4) / 4; - return do_log_cond(c * 2 + f, res); + return do_log_cond(ctx, c * 2 + f, d, res); } /* Similar, but for unit conditions. */ -static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, - TCGv_reg in1, TCGv_reg in2) +static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res, + TCGv_i64 in1, TCGv_i64 in2) { DisasCond cond; - TCGv_reg tmp, cb = NULL; + TCGv_i64 tmp, cb = NULL; + uint64_t d_repl = d ? 0x0000000100000001ull : 1; if (cf & 8) { /* Since we want to test lots of carry-out bits all at once, do not * do our normal thing and compute carry-in of bit B+1 since that * leaves us with carry bits spread across two words. */ - cb = tcg_temp_new(); - tmp = tcg_temp_new(); - tcg_gen_or_reg(cb, in1, in2); - tcg_gen_and_reg(tmp, in1, in2); - tcg_gen_andc_reg(cb, cb, res); - tcg_gen_or_reg(cb, cb, tmp); + cb = tcg_temp_new_i64(); + tmp = tcg_temp_new_i64(); + tcg_gen_or_i64(cb, in1, in2); + tcg_gen_and_i64(tmp, in1, in2); + tcg_gen_andc_i64(cb, cb, res); + tcg_gen_or_i64(cb, cb, tmp); } switch (cf >> 1) { @@ -1027,33 +921,33 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, /* See hasless(v,1) from * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord */ - tmp = tcg_temp_new(); - tcg_gen_subi_reg(tmp, res, 0x01010101u); - tcg_gen_andc_reg(tmp, tmp, res); - tcg_gen_andi_reg(tmp, tmp, 0x80808080u); + tmp = tcg_temp_new_i64(); + tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u); + tcg_gen_andc_i64(tmp, tmp, res); + tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u); cond = cond_make_0(TCG_COND_NE, tmp); break; case 3: /* SHZ / NHZ */ - tmp = tcg_temp_new(); - tcg_gen_subi_reg(tmp, res, 0x00010001u); - tcg_gen_andc_reg(tmp, tmp, res); - tcg_gen_andi_reg(tmp, tmp, 0x80008000u); + tmp = tcg_temp_new_i64(); + tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u); + tcg_gen_andc_i64(tmp, tmp, res); + tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u); cond = cond_make_0(TCG_COND_NE, tmp); break; case 4: /* SDC / NDC */ - tcg_gen_andi_reg(cb, cb, 0x88888888u); + tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u); cond = cond_make_0(TCG_COND_NE, cb); break; case 6: /* SBC / NBC */ - tcg_gen_andi_reg(cb, cb, 0x80808080u); + tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u); cond = cond_make_0(TCG_COND_NE, cb); break; case 7: /* SHC / NHC */ - tcg_gen_andi_reg(cb, cb, 0x80008000u); + tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u); cond = cond_make_0(TCG_COND_NE, cb); break; @@ -1067,68 +961,87 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, return cond; } +static TCGv_i64 get_carry(DisasContext *ctx, bool d, + TCGv_i64 cb, TCGv_i64 cb_msb) +{ + if (cond_need_ext(ctx, d)) { + TCGv_i64 t = tcg_temp_new_i64(); + tcg_gen_extract_i64(t, cb, 32, 1); + return t; + } + return cb_msb; +} + +static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d) +{ + return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb); +} + /* Compute signed overflow for addition. */ -static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res, - TCGv_reg in1, TCGv_reg in2) +static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res, + TCGv_i64 in1, TCGv_i64 in2) { - TCGv_reg sv = get_temp(ctx); - TCGv_reg tmp = tcg_temp_new(); + TCGv_i64 sv = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_new_i64(); - tcg_gen_xor_reg(sv, res, in1); - tcg_gen_xor_reg(tmp, in1, in2); - tcg_gen_andc_reg(sv, sv, tmp); + tcg_gen_xor_i64(sv, res, in1); + tcg_gen_xor_i64(tmp, in1, in2); + tcg_gen_andc_i64(sv, sv, tmp); return sv; } /* Compute signed overflow for subtraction. */ -static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res, - TCGv_reg in1, TCGv_reg in2) +static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res, + TCGv_i64 in1, TCGv_i64 in2) { - TCGv_reg sv = get_temp(ctx); - TCGv_reg tmp = tcg_temp_new(); + TCGv_i64 sv = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_new_i64(); - tcg_gen_xor_reg(sv, res, in1); - tcg_gen_xor_reg(tmp, in1, in2); - tcg_gen_and_reg(sv, sv, tmp); + tcg_gen_xor_i64(sv, res, in1); + tcg_gen_xor_i64(tmp, in1, in2); + tcg_gen_and_i64(sv, sv, tmp); return sv; } -static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, - TCGv_reg in2, unsigned shift, bool is_l, - bool is_tsv, bool is_tc, bool is_c, unsigned cf) +static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1, + TCGv_i64 in2, unsigned shift, bool is_l, + bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d) { - TCGv_reg dest, cb, cb_msb, sv, tmp; + TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp; unsigned c = cf >> 1; DisasCond cond; - dest = tcg_temp_new(); + dest = tcg_temp_new_i64(); cb = NULL; cb_msb = NULL; + cb_cond = NULL; if (shift) { - tmp = get_temp(ctx); - tcg_gen_shli_reg(tmp, in1, shift); + tmp = tcg_temp_new_i64(); + tcg_gen_shli_i64(tmp, in1, shift); in1 = tmp; } if (!is_l || cond_need_cb(c)) { - TCGv_reg zero = tcg_constant_reg(0); - cb_msb = get_temp(ctx); - tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero); + cb_msb = tcg_temp_new_i64(); + cb = tcg_temp_new_i64(); + + tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); if (is_c) { - tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero); + tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, + get_psw_carry(ctx, d), ctx->zero); } - if (!is_l) { - cb = get_temp(ctx); - tcg_gen_xor_reg(cb, in1, in2); - tcg_gen_xor_reg(cb, cb, dest); + tcg_gen_xor_i64(cb, in1, in2); + tcg_gen_xor_i64(cb, cb, dest); + if (cond_need_cb(c)) { + cb_cond = get_carry(ctx, d, cb, cb_msb); } } else { - tcg_gen_add_reg(dest, in1, in2); + tcg_gen_add_i64(dest, in1, in2); if (is_c) { - tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb); + tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d)); } } @@ -1143,10 +1056,10 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, } /* Emit any conditional trap before any writeback. */ - cond = do_cond(cf, dest, cb_msb, sv); + cond = do_cond(ctx, cf, d, dest, cb_cond, sv); if (is_tc) { - tmp = tcg_temp_new(); - tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); + tmp = tcg_temp_new_i64(); + tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); gen_helper_tcond(tcg_env, tmp); } @@ -1162,61 +1075,65 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, ctx->null_cond = cond; } -static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a, +static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a, bool is_l, bool is_tsv, bool is_tc, bool is_c) { - TCGv_reg tcg_r1, tcg_r2; + TCGv_i64 tcg_r1, tcg_r2; if (a->cf) { nullify_over(ctx); } tcg_r1 = load_gpr(ctx, a->r1); tcg_r2 = load_gpr(ctx, a->r2); - do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf); + do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, + is_tsv, is_tc, is_c, a->cf, a->d); return nullify_end(ctx); } static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv, bool is_tc) { - TCGv_reg tcg_im, tcg_r2; + TCGv_i64 tcg_im, tcg_r2; if (a->cf) { nullify_over(ctx); } - tcg_im = load_const(ctx, a->i); + tcg_im = tcg_constant_i64(a->i); tcg_r2 = load_gpr(ctx, a->r); - do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf); + /* All ADDI conditions are 32-bit. */ + do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false); return nullify_end(ctx); } -static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, - TCGv_reg in2, bool is_tsv, bool is_b, - bool is_tc, unsigned cf) +static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, + TCGv_i64 in2, bool is_tsv, bool is_b, + bool is_tc, unsigned cf, bool d) { - TCGv_reg dest, sv, cb, cb_msb, zero, tmp; + TCGv_i64 dest, sv, cb, cb_msb, tmp; unsigned c = cf >> 1; DisasCond cond; - dest = tcg_temp_new(); - cb = tcg_temp_new(); - cb_msb = tcg_temp_new(); + dest = tcg_temp_new_i64(); + cb = tcg_temp_new_i64(); + cb_msb = tcg_temp_new_i64(); - zero = tcg_constant_reg(0); if (is_b) { /* DEST,C = IN1 + ~IN2 + C. */ - tcg_gen_not_reg(cb, in2); - tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero); - tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero); - tcg_gen_xor_reg(cb, cb, in1); - tcg_gen_xor_reg(cb, cb, dest); + tcg_gen_not_i64(cb, in2); + tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, + get_psw_carry(ctx, d), ctx->zero); + tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero); + tcg_gen_xor_i64(cb, cb, in1); + tcg_gen_xor_i64(cb, cb, dest); } else { - /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer - operations by seeding the high word with 1 and subtracting. */ - tcg_gen_movi_reg(cb_msb, 1); - tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero); - tcg_gen_eqv_reg(cb, in1, in2); - tcg_gen_xor_reg(cb, cb, dest); + /* + * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer + * operations by seeding the high word with 1 and subtracting. + */ + TCGv_i64 one = tcg_constant_i64(1); + tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero); + tcg_gen_eqv_i64(cb, in1, in2); + tcg_gen_xor_i64(cb, cb, dest); } /* Compute signed overflow if required. */ @@ -1230,15 +1147,15 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, /* Compute the condition. We cannot use the special case for borrow. */ if (!is_b) { - cond = do_sub_cond(cf, dest, in1, in2, sv); + cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); } else { - cond = do_cond(cf, dest, cb_msb, sv); + cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv); } /* Emit any conditional trap before any writeback. */ if (is_tc) { - tmp = tcg_temp_new(); - tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); + tmp = tcg_temp_new_i64(); + tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); gen_helper_tcond(tcg_env, tmp); } @@ -1252,41 +1169,42 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, ctx->null_cond = cond; } -static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a, +static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tsv, bool is_b, bool is_tc) { - TCGv_reg tcg_r1, tcg_r2; + TCGv_i64 tcg_r1, tcg_r2; if (a->cf) { nullify_over(ctx); } tcg_r1 = load_gpr(ctx, a->r1); tcg_r2 = load_gpr(ctx, a->r2); - do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf); + do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d); return nullify_end(ctx); } static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) { - TCGv_reg tcg_im, tcg_r2; + TCGv_i64 tcg_im, tcg_r2; if (a->cf) { nullify_over(ctx); } - tcg_im = load_const(ctx, a->i); + tcg_im = tcg_constant_i64(a->i); tcg_r2 = load_gpr(ctx, a->r); - do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf); + /* All SUBI conditions are 32-bit. */ + do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false); return nullify_end(ctx); } -static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, - TCGv_reg in2, unsigned cf) +static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1, + TCGv_i64 in2, unsigned cf, bool d) { - TCGv_reg dest, sv; + TCGv_i64 dest, sv; DisasCond cond; - dest = tcg_temp_new(); - tcg_gen_sub_reg(dest, in1, in2); + dest = tcg_temp_new_i64(); + tcg_gen_sub_i64(dest, in1, in2); /* Compute signed overflow if required. */ sv = NULL; @@ -1295,10 +1213,10 @@ static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, } /* Form the condition for the compare. */ - cond = do_sub_cond(cf, dest, in1, in2, sv); + cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); /* Clear. */ - tcg_gen_movi_reg(dest, 0); + tcg_gen_movi_i64(dest, 0); save_gpr(ctx, rt, dest); /* Install the new nullification. */ @@ -1306,11 +1224,11 @@ static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, ctx->null_cond = cond; } -static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1, - TCGv_reg in2, unsigned cf, - void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) +static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1, + TCGv_i64 in2, unsigned cf, bool d, + void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) { - TCGv_reg dest = dest_gpr(ctx, rt); + TCGv_i64 dest = dest_gpr(ctx, rt); /* Perform the operation, and writeback. */ fn(dest, in1, in2); @@ -1319,29 +1237,29 @@ static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1, /* Install the new nullification. */ cond_free(&ctx->null_cond); if (cf) { - ctx->null_cond = do_log_cond(cf, dest); + ctx->null_cond = do_log_cond(ctx, cf, d, dest); } } -static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a, - void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) +static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a, + void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) { - TCGv_reg tcg_r1, tcg_r2; + TCGv_i64 tcg_r1, tcg_r2; if (a->cf) { nullify_over(ctx); } tcg_r1 = load_gpr(ctx, a->r1); tcg_r2 = load_gpr(ctx, a->r2); - do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn); + do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn); return nullify_end(ctx); } -static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, - TCGv_reg in2, unsigned cf, bool is_tc, - void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) +static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1, + TCGv_i64 in2, unsigned cf, bool d, bool is_tc, + void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) { - TCGv_reg dest; + TCGv_i64 dest; DisasCond cond; if (cf == 0) { @@ -1350,14 +1268,14 @@ static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, save_gpr(ctx, rt, dest); cond_free(&ctx->null_cond); } else { - dest = tcg_temp_new(); + dest = tcg_temp_new_i64(); fn(dest, in1, in2); - cond = do_unit_cond(cf, dest, in1, in2); + cond = do_unit_cond(cf, d, dest, in1, in2); if (is_tc) { - TCGv_reg tmp = tcg_temp_new(); - tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1); gen_helper_tcond(tcg_env, tmp); } save_gpr(ctx, rt, dest); @@ -1372,17 +1290,17 @@ static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, from the top 2 bits of the base register. There are a few system instructions that have a 3-bit space specifier, for which SR0 is not special. To handle this, pass ~SP. */ -static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) +static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base) { TCGv_ptr ptr; - TCGv_reg tmp; + TCGv_i64 tmp; TCGv_i64 spc; if (sp != 0) { if (sp < 0) { sp = ~sp; } - spc = get_temp_tl(ctx); + spc = tcg_temp_new_i64(); load_spr(ctx, spc, sp); return spc; } @@ -1391,12 +1309,13 @@ static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) } ptr = tcg_temp_new_ptr(); - tmp = tcg_temp_new(); - spc = get_temp_tl(ctx); + tmp = tcg_temp_new_i64(); + spc = tcg_temp_new_i64(); - tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5); - tcg_gen_andi_reg(tmp, tmp, 030); - tcg_gen_trunc_reg_ptr(ptr, tmp); + /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */ + tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5); + tcg_gen_andi_i64(tmp, tmp, 030); + tcg_gen_trunc_i64_ptr(ptr, tmp); tcg_gen_add_ptr(ptr, ptr, tcg_env); tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); @@ -1405,38 +1324,35 @@ static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) } #endif -static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, - unsigned rb, unsigned rx, int scale, target_sreg disp, +static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs, + unsigned rb, unsigned rx, int scale, int64_t disp, unsigned sp, int modify, bool is_phys) { - TCGv_reg base = load_gpr(ctx, rb); - TCGv_reg ofs; + TCGv_i64 base = load_gpr(ctx, rb); + TCGv_i64 ofs; + TCGv_i64 addr; + + set_insn_breg(ctx, rb); /* Note that RX is mutually exclusive with DISP. */ if (rx) { - ofs = get_temp(ctx); - tcg_gen_shli_reg(ofs, cpu_gr[rx], scale); - tcg_gen_add_reg(ofs, ofs, base); + ofs = tcg_temp_new_i64(); + tcg_gen_shli_i64(ofs, cpu_gr[rx], scale); + tcg_gen_add_i64(ofs, ofs, base); } else if (disp || modify) { - ofs = get_temp(ctx); - tcg_gen_addi_reg(ofs, base, disp); + ofs = tcg_temp_new_i64(); + tcg_gen_addi_i64(ofs, base, disp); } else { ofs = base; } *pofs = ofs; -#ifdef CONFIG_USER_ONLY - *pgva = (modify <= 0 ? ofs : base); -#else - TCGv_tl addr = get_temp_tl(ctx); - tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base); - if (ctx->tb_flags & PSW_W) { - tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull); - } + *pgva = addr = tcg_temp_new_i64(); + tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx)); +#ifndef CONFIG_USER_ONLY if (!is_phys) { - tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base)); + tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base)); } - *pgva = addr; #endif } @@ -1446,29 +1362,29 @@ static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, * = 0 for no base register update. */ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, - unsigned rx, int scale, target_sreg disp, + unsigned rx, int scale, int64_t disp, unsigned sp, int modify, MemOp mop) { - TCGv_reg ofs; - TCGv_tl addr; + TCGv_i64 ofs; + TCGv_i64 addr; /* Caller uses nullify_over/nullify_end. */ assert(ctx->null_cond.c == TCG_COND_NEVER); form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, ctx->mmu_idx == MMU_PHYS_IDX); - tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); + tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); if (modify) { save_gpr(ctx, rb, ofs); } } static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, - unsigned rx, int scale, target_sreg disp, + unsigned rx, int scale, int64_t disp, unsigned sp, int modify, MemOp mop) { - TCGv_reg ofs; - TCGv_tl addr; + TCGv_i64 ofs; + TCGv_i64 addr; /* Caller uses nullify_over/nullify_end. */ assert(ctx->null_cond.c == TCG_COND_NEVER); @@ -1482,11 +1398,11 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, } static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, - unsigned rx, int scale, target_sreg disp, + unsigned rx, int scale, int64_t disp, unsigned sp, int modify, MemOp mop) { - TCGv_reg ofs; - TCGv_tl addr; + TCGv_i64 ofs; + TCGv_i64 addr; /* Caller uses nullify_over/nullify_end. */ assert(ctx->null_cond.c == TCG_COND_NEVER); @@ -1500,11 +1416,11 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, } static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, - unsigned rx, int scale, target_sreg disp, + unsigned rx, int scale, int64_t disp, unsigned sp, int modify, MemOp mop) { - TCGv_reg ofs; - TCGv_tl addr; + TCGv_i64 ofs; + TCGv_i64 addr; /* Caller uses nullify_over/nullify_end. */ assert(ctx->null_cond.c == TCG_COND_NEVER); @@ -1517,19 +1433,11 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, } } -#if TARGET_REGISTER_BITS == 64 -#define do_load_reg do_load_64 -#define do_store_reg do_store_64 -#else -#define do_load_reg do_load_32 -#define do_store_reg do_store_32 -#endif - static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, - unsigned rx, int scale, target_sreg disp, + unsigned rx, int scale, int64_t disp, unsigned sp, int modify, MemOp mop) { - TCGv_reg dest; + TCGv_i64 dest; nullify_over(ctx); @@ -1538,16 +1446,16 @@ static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, dest = dest_gpr(ctx, rt); } else { /* Make sure if RT == RB, we see the result of the load. */ - dest = get_temp(ctx); + dest = tcg_temp_new_i64(); } - do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop); + do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop); save_gpr(ctx, rt, dest); return nullify_end(ctx); } static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, - unsigned rx, int scale, target_sreg disp, + unsigned rx, int scale, int64_t disp, unsigned sp, int modify) { TCGv_i32 tmp; @@ -1572,7 +1480,7 @@ static bool trans_fldw(DisasContext *ctx, arg_ldst *a) } static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, - unsigned rx, int scale, target_sreg disp, + unsigned rx, int scale, int64_t disp, unsigned sp, int modify) { TCGv_i64 tmp; @@ -1597,16 +1505,16 @@ static bool trans_fldd(DisasContext *ctx, arg_ldst *a) } static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, - target_sreg disp, unsigned sp, + int64_t disp, unsigned sp, int modify, MemOp mop) { nullify_over(ctx); - do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); + do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); return nullify_end(ctx); } static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, - unsigned rx, int scale, target_sreg disp, + unsigned rx, int scale, int64_t disp, unsigned sp, int modify) { TCGv_i32 tmp; @@ -1626,7 +1534,7 @@ static bool trans_fstw(DisasContext *ctx, arg_ldst *a) } static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, - unsigned rx, int scale, target_sreg disp, + unsigned rx, int scale, int64_t disp, unsigned sp, int modify) { TCGv_i64 tmp; @@ -1739,12 +1647,12 @@ static bool do_fop_dedd(DisasContext *ctx, unsigned rt, /* Emit an unconditional branch to a direct target, which may or may not have already had nullification handled. */ -static bool do_dbranch(DisasContext *ctx, target_ureg dest, +static bool do_dbranch(DisasContext *ctx, uint64_t dest, unsigned link, bool is_n) { if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { if (link != 0) { - copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); + copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); } ctx->iaoq_n = dest; if (is_n) { @@ -1754,7 +1662,7 @@ static bool do_dbranch(DisasContext *ctx, target_ureg dest, nullify_over(ctx); if (link != 0) { - copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); + copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); } if (is_n && use_nullify_skip(ctx)) { @@ -1776,10 +1684,10 @@ static bool do_dbranch(DisasContext *ctx, target_ureg dest, /* Emit a conditional branch to a direct target. If the branch itself is nullified, we should have already used nullify_over. */ -static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n, +static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n, DisasCond *cond) { - target_ureg dest = iaoq_dest(ctx, disp); + uint64_t dest = iaoq_dest(ctx, disp); TCGLabel *taken = NULL; TCGCond c = cond->c; bool n; @@ -1795,7 +1703,7 @@ static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n, } taken = gen_new_label(); - tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken); + tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken); cond_free(cond); /* Not taken: Condition not satisfied; nullify on backward branches. */ @@ -1812,7 +1720,7 @@ static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n, if (ctx->iaoq_n == -1) { /* The temporary iaoq_n_var died at the branch above. Regenerate it here instead of saving it. */ - tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); + tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4); } gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); } @@ -1842,24 +1750,25 @@ static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n, /* Emit an unconditional branch to an indirect target. This handles nullification of the branch itself. */ -static bool do_ibranch(DisasContext *ctx, TCGv_reg dest, +static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest, unsigned link, bool is_n) { - TCGv_reg a0, a1, next, tmp; + TCGv_i64 a0, a1, next, tmp; TCGCond c; assert(ctx->null_lab == NULL); if (ctx->null_cond.c == TCG_COND_NEVER) { if (link != 0) { - copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); + copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); } - next = get_temp(ctx); - tcg_gen_mov_reg(next, dest); + next = tcg_temp_new_i64(); + tcg_gen_mov_i64(next, dest); if (is_n) { if (use_nullify_skip(ctx)) { - tcg_gen_mov_reg(cpu_iaoq_f, next); - tcg_gen_addi_reg(cpu_iaoq_b, next, 4); + copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next); + tcg_gen_addi_i64(next, next, 4); + copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next); nullify_set(ctx, 0); ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; return true; @@ -1881,12 +1790,14 @@ static bool do_ibranch(DisasContext *ctx, TCGv_reg dest, /* We do have to handle the non-local temporary, DEST, before branching. Since IOAQ_F is not really live at this point, we can simply store DEST optimistically. Similarly with IAOQ_B. */ - tcg_gen_mov_reg(cpu_iaoq_f, dest); - tcg_gen_addi_reg(cpu_iaoq_b, dest, 4); + copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest); + next = tcg_temp_new_i64(); + tcg_gen_addi_i64(next, dest, 4); + copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next); nullify_over(ctx); if (link != 0) { - tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n); + copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); } tcg_gen_lookup_and_goto_ptr(); return nullify_end(ctx); @@ -1895,23 +1806,23 @@ static bool do_ibranch(DisasContext *ctx, TCGv_reg dest, a0 = ctx->null_cond.a0; a1 = ctx->null_cond.a1; - tmp = tcg_temp_new(); - next = get_temp(ctx); + tmp = tcg_temp_new_i64(); + next = tcg_temp_new_i64(); - copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var); - tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest); + copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var); + tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest); ctx->iaoq_n = -1; ctx->iaoq_n_var = next; if (link != 0) { - tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); + tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); } if (is_n) { /* The branch nullifies the next insn, which means the state of N after the branch is the inverse of the state of N that applied to the branch. */ - tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1); + tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1); cond_free(&ctx->null_cond); ctx->null_cond = cond_make_n(); ctx->psw_n_nonzero = true; @@ -1929,23 +1840,23 @@ static bool do_ibranch(DisasContext *ctx, TCGv_reg dest, * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; * which keeps the privilege level from being increased. */ -static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset) +static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset) { - TCGv_reg dest; + TCGv_i64 dest; switch (ctx->privilege) { case 0: /* Privilege 0 is maximum and is allowed to decrease. */ return offset; case 3: /* Privilege 3 is minimum and is never allowed to increase. */ - dest = get_temp(ctx); - tcg_gen_ori_reg(dest, offset, 3); + dest = tcg_temp_new_i64(); + tcg_gen_ori_i64(dest, offset, 3); break; default: - dest = get_temp(ctx); - tcg_gen_andi_reg(dest, offset, -4); - tcg_gen_ori_reg(dest, dest, ctx->privilege); - tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset); + dest = tcg_temp_new_i64(); + tcg_gen_andi_i64(dest, offset, -4); + tcg_gen_ori_i64(dest, dest, ctx->privilege); + tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset); break; } return dest; @@ -1961,6 +1872,8 @@ static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset) aforementioned BE. */ static void do_page_zero(DisasContext *ctx) { + TCGv_i64 tmp; + /* If by some means we get here with PSW[N]=1, that implies that the B,GATE instruction would be skipped, and we'd fault on the next insn within the privileged page. */ @@ -1968,7 +1881,7 @@ static void do_page_zero(DisasContext *ctx) case TCG_COND_NEVER: break; case TCG_COND_ALWAYS: - tcg_gen_movi_reg(cpu_psw_n, 0); + tcg_gen_movi_i64(cpu_psw_n, 0); goto do_sigill; default: /* Since this is always the first (and only) insn within the @@ -1996,9 +1909,12 @@ static void do_page_zero(DisasContext *ctx) break; case 0xe0: /* SET_THREAD_POINTER */ - tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27])); - tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3); - tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); + tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27])); + tmp = tcg_temp_new_i64(); + tcg_gen_ori_i64(tmp, cpu_gr[31], 3); + copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp); + tcg_gen_addi_i64(tmp, tmp, 4); + copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; break; @@ -2039,8 +1955,8 @@ static bool trans_sync(DisasContext *ctx, arg_sync *a) static bool trans_mfia(DisasContext *ctx, arg_mfia *a) { unsigned rt = a->t; - TCGv_reg tmp = dest_gpr(ctx, rt); - tcg_gen_movi_reg(tmp, ctx->iaoq_f); + TCGv_i64 tmp = dest_gpr(ctx, rt); + tcg_gen_movi_i64(tmp, ctx->iaoq_f); save_gpr(ctx, rt, tmp); cond_free(&ctx->null_cond); @@ -2052,13 +1968,11 @@ static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) unsigned rt = a->t; unsigned rs = a->sp; TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_reg t1 = tcg_temp_new(); load_spr(ctx, t0, rs); tcg_gen_shri_i64(t0, t0, 32); - tcg_gen_trunc_i64_reg(t1, t0); - save_gpr(ctx, rt, t1); + save_gpr(ctx, rt, t0); cond_free(&ctx->null_cond); return true; @@ -2068,19 +1982,17 @@ static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) { unsigned rt = a->t; unsigned ctl = a->r; - TCGv_reg tmp; + TCGv_i64 tmp; switch (ctl) { case CR_SAR: -#ifdef TARGET_HPPA64 if (a->e == 0) { /* MFSAR without ,W masks low 5 bits. */ tmp = dest_gpr(ctx, rt); - tcg_gen_andi_reg(tmp, cpu_sar, 31); + tcg_gen_andi_i64(tmp, cpu_sar, 31); save_gpr(ctx, rt, tmp); goto done; } -#endif save_gpr(ctx, rt, cpu_sar); goto done; case CR_IT: /* Interval Timer */ @@ -2104,8 +2016,8 @@ static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) break; } - tmp = get_temp(ctx); - tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); + tmp = tcg_temp_new_i64(); + tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); save_gpr(ctx, rt, tmp); done: @@ -2117,22 +2029,21 @@ static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) { unsigned rr = a->r; unsigned rs = a->sp; - TCGv_i64 t64; + TCGv_i64 tmp; if (rs >= 5) { CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); } nullify_over(ctx); - t64 = tcg_temp_new_i64(); - tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr)); - tcg_gen_shli_i64(t64, t64, 32); + tmp = tcg_temp_new_i64(); + tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32); if (rs >= 4) { - tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs])); + tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs])); ctx->tb_flags &= ~TB_FLAG_SR_SAME; } else { - tcg_gen_mov_i64(cpu_sr[rs], t64); + tcg_gen_mov_i64(cpu_sr[rs], tmp); } return nullify_end(ctx); @@ -2141,13 +2052,13 @@ static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) { unsigned ctl = a->t; - TCGv_reg reg; - TCGv_reg tmp; + TCGv_i64 reg; + TCGv_i64 tmp; if (ctl == CR_SAR) { reg = load_gpr(ctx, a->r); - tmp = tcg_temp_new(); - tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1); + tmp = tcg_temp_new_i64(); + tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31); save_or_nullify(ctx, cpu_sar, tmp); cond_free(&ctx->null_cond); @@ -2159,7 +2070,13 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) #ifndef CONFIG_USER_ONLY nullify_over(ctx); - reg = load_gpr(ctx, a->r); + + if (ctx->is_pa20) { + reg = load_gpr(ctx, a->r); + } else { + reg = tcg_temp_new_i64(); + tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r)); + } switch (ctl) { case CR_IT: @@ -2177,11 +2094,11 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) case CR_IIAOQ: /* FIXME: Respect PSW_Q bit */ /* The write advances the queue and stores to the back element. */ - tmp = get_temp(ctx); - tcg_gen_ld_reg(tmp, tcg_env, + tmp = tcg_temp_new_i64(); + tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); - tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); - tcg_gen_st_reg(reg, tcg_env, + tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); + tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); break; @@ -2189,14 +2106,14 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) case CR_PID2: case CR_PID3: case CR_PID4: - tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); + tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); #ifndef CONFIG_USER_ONLY gen_helper_change_prot_id(tcg_env); #endif break; default: - tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); + tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); break; } return nullify_end(ctx); @@ -2205,10 +2122,10 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) { - TCGv_reg tmp = tcg_temp_new(); + TCGv_i64 tmp = tcg_temp_new_i64(); - tcg_gen_not_reg(tmp, load_gpr(ctx, a->r)); - tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); + tcg_gen_not_i64(tmp, load_gpr(ctx, a->r)); + tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31); save_or_nullify(ctx, cpu_sar, tmp); cond_free(&ctx->null_cond); @@ -2217,17 +2134,14 @@ static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) { - TCGv_reg dest = dest_gpr(ctx, a->t); + TCGv_i64 dest = dest_gpr(ctx, a->t); #ifdef CONFIG_USER_ONLY /* We don't implement space registers in user mode. */ - tcg_gen_movi_reg(dest, 0); + tcg_gen_movi_i64(dest, 0); #else - TCGv_i64 t0 = tcg_temp_new_i64(); - - tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b))); - tcg_gen_shri_i64(t0, t0, 32); - tcg_gen_trunc_i64_reg(dest, t0); + tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b))); + tcg_gen_shri_i64(dest, dest, 32); #endif save_gpr(ctx, a->t, dest); @@ -2239,13 +2153,13 @@ static bool trans_rsm(DisasContext *ctx, arg_rsm *a) { CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); #ifndef CONFIG_USER_ONLY - TCGv_reg tmp; + TCGv_i64 tmp; nullify_over(ctx); - tmp = get_temp(ctx); - tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw)); - tcg_gen_andi_reg(tmp, tmp, ~a->i); + tmp = tcg_temp_new_i64(); + tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); + tcg_gen_andi_i64(tmp, tmp, ~a->i); gen_helper_swap_system_mask(tmp, tcg_env, tmp); save_gpr(ctx, a->t, tmp); @@ -2259,13 +2173,13 @@ static bool trans_ssm(DisasContext *ctx, arg_ssm *a) { CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); #ifndef CONFIG_USER_ONLY - TCGv_reg tmp; + TCGv_i64 tmp; nullify_over(ctx); - tmp = get_temp(ctx); - tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw)); - tcg_gen_ori_reg(tmp, tmp, a->i); + tmp = tcg_temp_new_i64(); + tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); + tcg_gen_ori_i64(tmp, tmp, a->i); gen_helper_swap_system_mask(tmp, tcg_env, tmp); save_gpr(ctx, a->t, tmp); @@ -2279,11 +2193,11 @@ static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) { CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); #ifndef CONFIG_USER_ONLY - TCGv_reg tmp, reg; + TCGv_i64 tmp, reg; nullify_over(ctx); reg = load_gpr(ctx, a->r); - tmp = get_temp(ctx); + tmp = tcg_temp_new_i64(); gen_helper_swap_system_mask(tmp, tcg_env, reg); /* Exit the TB to recognize new interrupts. */ @@ -2356,12 +2270,12 @@ static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) { if (a->m) { - TCGv_reg dest = dest_gpr(ctx, a->b); - TCGv_reg src1 = load_gpr(ctx, a->b); - TCGv_reg src2 = load_gpr(ctx, a->x); + TCGv_i64 dest = dest_gpr(ctx, a->b); + TCGv_i64 src1 = load_gpr(ctx, a->b); + TCGv_i64 src2 = load_gpr(ctx, a->x); /* The only thing we need to do is the base register modification. */ - tcg_gen_add_reg(dest, src1, src2); + tcg_gen_add_i64(dest, src1, src2); save_gpr(ctx, a->b, dest); } cond_free(&ctx->null_cond); @@ -2370,9 +2284,9 @@ static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) static bool trans_probe(DisasContext *ctx, arg_probe *a) { - TCGv_reg dest, ofs; + TCGv_i64 dest, ofs; TCGv_i32 level, want; - TCGv_tl addr; + TCGv_i64 addr; nullify_over(ctx); @@ -2383,7 +2297,7 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a) level = tcg_constant_i32(a->ri); } else { level = tcg_temp_new_i32(); - tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri)); + tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri)); tcg_gen_andi_i32(level, level, 3); } want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ); @@ -2396,19 +2310,22 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a) static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) { + if (ctx->is_pa20) { + return false; + } CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); #ifndef CONFIG_USER_ONLY - TCGv_tl addr; - TCGv_reg ofs, reg; + TCGv_i64 addr; + TCGv_i64 ofs, reg; nullify_over(ctx); form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); reg = load_gpr(ctx, a->r); if (a->addr) { - gen_helper_itlba(tcg_env, addr, reg); + gen_helper_itlba_pa11(tcg_env, addr, reg); } else { - gen_helper_itlbp(tcg_env, addr, reg); + gen_helper_itlbp_pa11(tcg_env, addr, reg); } /* Exit TB for TLB change if mmu is enabled. */ @@ -2419,25 +2336,63 @@ static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) #endif } -static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a) +static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local) { CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); #ifndef CONFIG_USER_ONLY - TCGv_tl addr; - TCGv_reg ofs; + TCGv_i64 addr; + TCGv_i64 ofs; nullify_over(ctx); form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); - if (a->m) { - save_gpr(ctx, a->b, ofs); + + /* + * Page align now, rather than later, so that we can add in the + * page_size field from pa2.0 from the low 4 bits of GR[b]. + */ + tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK); + if (ctx->is_pa20) { + tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4); } - if (a->local) { - gen_helper_ptlbe(tcg_env); + + if (local) { + gen_helper_ptlb_l(tcg_env, addr); } else { gen_helper_ptlb(tcg_env, addr); } + if (a->m) { + save_gpr(ctx, a->b, ofs); + } + + /* Exit TB for TLB change if mmu is enabled. */ + if (ctx->tb_flags & PSW_C) { + ctx->base.is_jmp = DISAS_IAQ_N_STALE; + } + return nullify_end(ctx); +#endif +} + +static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a) +{ + return do_pxtlb(ctx, a, false); +} + +static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a) +{ + return ctx->is_pa20 && do_pxtlb(ctx, a, true); +} + +static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a) +{ + CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); +#ifndef CONFIG_USER_ONLY + nullify_over(ctx); + + trans_nop_addrx(ctx, a); + gen_helper_ptlbe(tcg_env); + /* Exit TB for TLB change if mmu is enabled. */ if (ctx->tb_flags & PSW_C) { ctx->base.is_jmp = DISAS_IAQ_N_STALE; @@ -2454,10 +2409,13 @@ static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a) */ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) { + if (ctx->is_pa20) { + return false; + } CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); #ifndef CONFIG_USER_ONLY - TCGv_tl addr, atl, stl; - TCGv_reg reg; + TCGv_i64 addr, atl, stl; + TCGv_i64 reg; nullify_over(ctx); @@ -2465,13 +2423,11 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) * FIXME: * if (not (pcxl or pcxl2)) * return gen_illegal(ctx); - * - * Note for future: these are 32-bit systems; no hppa64. */ - atl = tcg_temp_new_tl(); - stl = tcg_temp_new_tl(); - addr = tcg_temp_new_tl(); + atl = tcg_temp_new_i64(); + stl = tcg_temp_new_i64(); + addr = tcg_temp_new_i64(); tcg_gen_ld32u_i64(stl, tcg_env, a->data ? offsetof(CPUHPPAState, cr[CR_ISR]) @@ -2480,13 +2436,13 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) a->data ? offsetof(CPUHPPAState, cr[CR_IOR]) : offsetof(CPUHPPAState, cr[CR_IIAOQ])); tcg_gen_shli_i64(stl, stl, 32); - tcg_gen_or_tl(addr, atl, stl); + tcg_gen_or_i64(addr, atl, stl); reg = load_gpr(ctx, a->r); if (a->addr) { - gen_helper_itlba(tcg_env, addr, reg); + gen_helper_itlba_pa11(tcg_env, addr, reg); } else { - gen_helper_itlbp(tcg_env, addr, reg); + gen_helper_itlbp_pa11(tcg_env, addr, reg); } /* Exit TB for TLB change if mmu is enabled. */ @@ -2497,18 +2453,44 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) #endif } +static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a) +{ + if (!ctx->is_pa20) { + return false; + } + CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); +#ifndef CONFIG_USER_ONLY + nullify_over(ctx); + { + TCGv_i64 src1 = load_gpr(ctx, a->r1); + TCGv_i64 src2 = load_gpr(ctx, a->r2); + + if (a->data) { + gen_helper_idtlbt_pa20(tcg_env, src1, src2); + } else { + gen_helper_iitlbt_pa20(tcg_env, src1, src2); + } + } + /* Exit TB for TLB change if mmu is enabled. */ + if (ctx->tb_flags & PSW_C) { + ctx->base.is_jmp = DISAS_IAQ_N_STALE; + } + return nullify_end(ctx); +#endif +} + static bool trans_lpa(DisasContext *ctx, arg_ldst *a) { CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); #ifndef CONFIG_USER_ONLY - TCGv_tl vaddr; - TCGv_reg ofs, paddr; + TCGv_i64 vaddr; + TCGv_i64 ofs, paddr; nullify_over(ctx); form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); - paddr = tcg_temp_new(); + paddr = tcg_temp_new_i64(); gen_helper_lpa(paddr, tcg_env, vaddr); /* Note that physical address result overrides base modification. */ @@ -2529,78 +2511,78 @@ static bool trans_lci(DisasContext *ctx, arg_lci *a) physical address. Two addresses with the same CI have a coherent view of the cache. Our implementation is to return 0 for all, since the entire address space is coherent. */ - save_gpr(ctx, a->t, tcg_constant_reg(0)); + save_gpr(ctx, a->t, ctx->zero); cond_free(&ctx->null_cond); return true; } -static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a) +static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a) { return do_add_reg(ctx, a, false, false, false, false); } -static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a) +static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a) { return do_add_reg(ctx, a, true, false, false, false); } -static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) +static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) { return do_add_reg(ctx, a, false, true, false, false); } -static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a) +static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a) { return do_add_reg(ctx, a, false, false, false, true); } -static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) +static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) { return do_add_reg(ctx, a, false, true, false, true); } -static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a) { return do_sub_reg(ctx, a, false, false, false); } -static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a) { return do_sub_reg(ctx, a, true, false, false); } -static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a) { return do_sub_reg(ctx, a, false, false, true); } -static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a) { return do_sub_reg(ctx, a, true, false, true); } -static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a) { return do_sub_reg(ctx, a, false, true, false); } -static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a) { return do_sub_reg(ctx, a, true, true, false); } -static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a) { - return do_log_reg(ctx, a, tcg_gen_andc_reg); + return do_log_reg(ctx, a, tcg_gen_andc_i64); } -static bool trans_and(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a) { - return do_log_reg(ctx, a, tcg_gen_and_reg); + return do_log_reg(ctx, a, tcg_gen_and_i64); } -static bool trans_or(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a) { if (a->cf == 0) { unsigned r2 = a->r2; @@ -2613,8 +2595,8 @@ static bool trans_or(DisasContext *ctx, arg_rrr_cf *a) } if (r2 == 0) { /* COPY */ if (r1 == 0) { - TCGv_reg dest = dest_gpr(ctx, rt); - tcg_gen_movi_reg(dest, 0); + TCGv_i64 dest = dest_gpr(ctx, rt); + tcg_gen_movi_i64(dest, 0); save_gpr(ctx, rt, dest); } else { save_gpr(ctx, rt, cpu_gr[r1]); @@ -2635,8 +2617,8 @@ static bool trans_or(DisasContext *ctx, arg_rrr_cf *a) nullify_over(ctx); /* Advance the instruction queue. */ - copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); - copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); + copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); + copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); nullify_set(ctx, 0); /* Tell the qemu main loop to halt until this cpu has work. */ @@ -2649,142 +2631,146 @@ static bool trans_or(DisasContext *ctx, arg_rrr_cf *a) } #endif } - return do_log_reg(ctx, a, tcg_gen_or_reg); + return do_log_reg(ctx, a, tcg_gen_or_i64); } -static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a) { - return do_log_reg(ctx, a, tcg_gen_xor_reg); + return do_log_reg(ctx, a, tcg_gen_xor_i64); } -static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a) { - TCGv_reg tcg_r1, tcg_r2; + TCGv_i64 tcg_r1, tcg_r2; if (a->cf) { nullify_over(ctx); } tcg_r1 = load_gpr(ctx, a->r1); tcg_r2 = load_gpr(ctx, a->r2); - do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf); + do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d); return nullify_end(ctx); } -static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a) { - TCGv_reg tcg_r1, tcg_r2; + TCGv_i64 tcg_r1, tcg_r2; if (a->cf) { nullify_over(ctx); } tcg_r1 = load_gpr(ctx, a->r1); tcg_r2 = load_gpr(ctx, a->r2); - do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg); + do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64); return nullify_end(ctx); } -static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc) +static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc) { - TCGv_reg tcg_r1, tcg_r2, tmp; + TCGv_i64 tcg_r1, tcg_r2, tmp; if (a->cf) { nullify_over(ctx); } tcg_r1 = load_gpr(ctx, a->r1); tcg_r2 = load_gpr(ctx, a->r2); - tmp = get_temp(ctx); - tcg_gen_not_reg(tmp, tcg_r2); - do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg); + tmp = tcg_temp_new_i64(); + tcg_gen_not_i64(tmp, tcg_r2); + do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64); return nullify_end(ctx); } -static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a) { return do_uaddcm(ctx, a, false); } -static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a) +static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a) { return do_uaddcm(ctx, a, true); } -static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i) +static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i) { - TCGv_reg tmp; + TCGv_i64 tmp; nullify_over(ctx); - tmp = get_temp(ctx); - tcg_gen_shri_reg(tmp, cpu_psw_cb, 3); + tmp = tcg_temp_new_i64(); + tcg_gen_shri_i64(tmp, cpu_psw_cb, 3); if (!is_i) { - tcg_gen_not_reg(tmp, tmp); + tcg_gen_not_i64(tmp, tmp); } - tcg_gen_andi_reg(tmp, tmp, 0x11111111); - tcg_gen_muli_reg(tmp, tmp, 6); - do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false, - is_i ? tcg_gen_add_reg : tcg_gen_sub_reg); + tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull); + tcg_gen_muli_i64(tmp, tmp, 6); + do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false, + is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64); return nullify_end(ctx); } -static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a) +static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a) { return do_dcor(ctx, a, false); } -static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a) +static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a) { return do_dcor(ctx, a, true); } static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) { - TCGv_reg dest, add1, add2, addc, zero, in1, in2; + TCGv_i64 dest, add1, add2, addc, in1, in2; + TCGv_i64 cout; nullify_over(ctx); in1 = load_gpr(ctx, a->r1); in2 = load_gpr(ctx, a->r2); - add1 = tcg_temp_new(); - add2 = tcg_temp_new(); - addc = tcg_temp_new(); - dest = tcg_temp_new(); - zero = tcg_constant_reg(0); + add1 = tcg_temp_new_i64(); + add2 = tcg_temp_new_i64(); + addc = tcg_temp_new_i64(); + dest = tcg_temp_new_i64(); /* Form R1 << 1 | PSW[CB]{8}. */ - tcg_gen_add_reg(add1, in1, in1); - tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb); - - /* Add or subtract R2, depending on PSW[V]. Proper computation of - carry{8} requires that we subtract via + ~R2 + 1, as described in - the manual. By extracting and masking V, we can produce the - proper inputs to the addition without movcond. */ - tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1); - tcg_gen_xor_reg(add2, in2, addc); - tcg_gen_andi_reg(addc, addc, 1); - /* ??? This is only correct for 32-bit. */ - tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); - tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); + tcg_gen_add_i64(add1, in1, in1); + tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false)); + + /* + * Add or subtract R2, depending on PSW[V]. Proper computation of + * carry requires that we subtract via + ~R2 + 1, as described in + * the manual. By extracting and masking V, we can produce the + * proper inputs to the addition without movcond. + */ + tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1); + tcg_gen_xor_i64(add2, in2, addc); + tcg_gen_andi_i64(addc, addc, 1); + + tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero); + tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, + addc, ctx->zero); /* Write back the result register. */ save_gpr(ctx, a->t, dest); /* Write back PSW[CB]. */ - tcg_gen_xor_reg(cpu_psw_cb, add1, add2); - tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest); + tcg_gen_xor_i64(cpu_psw_cb, add1, add2); + tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest); /* Write back PSW[V] for the division step. */ - tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb); - tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2); + cout = get_psw_carry(ctx, false); + tcg_gen_neg_i64(cpu_psw_v, cout); + tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2); /* Install the new nullification. */ if (a->cf) { - TCGv_reg sv = NULL; + TCGv_i64 sv = NULL; if (cond_need_sv(a->cf >> 1)) { /* ??? The lshift is supposed to contribute to overflow. */ sv = do_add_sv(ctx, dest, add1, add2); } - ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv); + ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv); } return nullify_end(ctx); @@ -2820,53 +2806,270 @@ static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) return do_sub_imm(ctx, a, true); } -static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a) +static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a) { - TCGv_reg tcg_im, tcg_r2; + TCGv_i64 tcg_im, tcg_r2; if (a->cf) { nullify_over(ctx); } - tcg_im = load_const(ctx, a->i); + tcg_im = tcg_constant_i64(a->i); tcg_r2 = load_gpr(ctx, a->r); - do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf); + do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d); + + return nullify_end(ctx); +} + +static bool do_multimedia(DisasContext *ctx, arg_rrr *a, + void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 r1, r2, dest; + + if (!ctx->is_pa20) { + return false; + } + + nullify_over(ctx); + + r1 = load_gpr(ctx, a->r1); + r2 = load_gpr(ctx, a->r2); + dest = dest_gpr(ctx, a->t); + + fn(dest, r1, r2); + save_gpr(ctx, a->t, dest); return nullify_end(ctx); } +static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a, + void (*fn)(TCGv_i64, TCGv_i64, int64_t)) +{ + TCGv_i64 r, dest; + + if (!ctx->is_pa20) { + return false; + } + + nullify_over(ctx); + + r = load_gpr(ctx, a->r); + dest = dest_gpr(ctx, a->t); + + fn(dest, r, a->i); + save_gpr(ctx, a->t, dest); + + return nullify_end(ctx); +} + +static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a, + void (*fn)(TCGv_i64, TCGv_i64, + TCGv_i64, TCGv_i32)) +{ + TCGv_i64 r1, r2, dest; + + if (!ctx->is_pa20) { + return false; + } + + nullify_over(ctx); + + r1 = load_gpr(ctx, a->r1); + r2 = load_gpr(ctx, a->r2); + dest = dest_gpr(ctx, a->t); + + fn(dest, r1, r2, tcg_constant_i32(a->sh)); + save_gpr(ctx, a->t, dest); + + return nullify_end(ctx); +} + +static bool trans_hadd(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, tcg_gen_vec_add16_i64); +} + +static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, gen_helper_hadd_ss); +} + +static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, gen_helper_hadd_us); +} + +static bool trans_havg(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, gen_helper_havg); +} + +static bool trans_hshl(DisasContext *ctx, arg_rri *a) +{ + return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64); +} + +static bool trans_hshr_s(DisasContext *ctx, arg_rri *a) +{ + return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64); +} + +static bool trans_hshr_u(DisasContext *ctx, arg_rri *a) +{ + return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64); +} + +static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a) +{ + return do_multimedia_shadd(ctx, a, gen_helper_hshladd); +} + +static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a) +{ + return do_multimedia_shadd(ctx, a, gen_helper_hshradd); +} + +static bool trans_hsub(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64); +} + +static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, gen_helper_hsub_ss); +} + +static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, gen_helper_hsub_us); +} + +static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) +{ + uint64_t mask = 0xffff0000ffff0000ull; + TCGv_i64 tmp = tcg_temp_new_i64(); + + tcg_gen_andi_i64(tmp, r2, mask); + tcg_gen_andi_i64(dst, r1, mask); + tcg_gen_shri_i64(tmp, tmp, 16); + tcg_gen_or_i64(dst, dst, tmp); +} + +static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, gen_mixh_l); +} + +static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) +{ + uint64_t mask = 0x0000ffff0000ffffull; + TCGv_i64 tmp = tcg_temp_new_i64(); + + tcg_gen_andi_i64(tmp, r1, mask); + tcg_gen_andi_i64(dst, r2, mask); + tcg_gen_shli_i64(tmp, tmp, 16); + tcg_gen_or_i64(dst, dst, tmp); +} + +static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, gen_mixh_r); +} + +static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) +{ + TCGv_i64 tmp = tcg_temp_new_i64(); + + tcg_gen_shri_i64(tmp, r2, 32); + tcg_gen_deposit_i64(dst, r1, tmp, 0, 32); +} + +static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, gen_mixw_l); +} + +static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) +{ + tcg_gen_deposit_i64(dst, r2, r1, 32, 32); +} + +static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a) +{ + return do_multimedia(ctx, a, gen_mixw_r); +} + +static bool trans_permh(DisasContext *ctx, arg_permh *a) +{ + TCGv_i64 r, t0, t1, t2, t3; + + if (!ctx->is_pa20) { + return false; + } + + nullify_over(ctx); + + r = load_gpr(ctx, a->r1); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + t3 = tcg_temp_new_i64(); + + tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16); + tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16); + tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16); + tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16); + + tcg_gen_deposit_i64(t0, t1, t0, 16, 48); + tcg_gen_deposit_i64(t2, t3, t2, 16, 48); + tcg_gen_deposit_i64(t0, t2, t0, 32, 32); + + save_gpr(ctx, a->t, t0); + return nullify_end(ctx); +} + static bool trans_ld(DisasContext *ctx, arg_ldst *a) { - if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { + if (ctx->is_pa20) { + /* + * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches. + * Any base modification still occurs. + */ + if (a->t == 0) { + return trans_nop_addrx(ctx, a); + } + } else if (a->size > MO_32) { return gen_illegal(ctx); - } else { - return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, - a->disp, a->sp, a->m, a->size | MO_TE); } + return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, + a->disp, a->sp, a->m, a->size | MO_TE); } static bool trans_st(DisasContext *ctx, arg_ldst *a) { assert(a->x == 0 && a->scale == 0); - if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { + if (!ctx->is_pa20 && a->size > MO_32) { return gen_illegal(ctx); - } else { - return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); } + return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); } static bool trans_ldc(DisasContext *ctx, arg_ldst *a) { MemOp mop = MO_TE | MO_ALIGN | a->size; - TCGv_reg zero, dest, ofs; - TCGv_tl addr; + TCGv_i64 dest, ofs; + TCGv_i64 addr; + + if (!ctx->is_pa20 && a->size > MO_32) { + return gen_illegal(ctx); + } nullify_over(ctx); if (a->m) { /* Base register modification. Make sure if RT == RB, we see the result of the load. */ - dest = get_temp(ctx); + dest = tcg_temp_new_i64(); } else { dest = dest_gpr(ctx, a->t); } @@ -2884,8 +3087,7 @@ static bool trans_ldc(DisasContext *ctx, arg_ldst *a) */ gen_helper_ldc_check(addr); - zero = tcg_constant_reg(0); - tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop); + tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop); if (a->m) { save_gpr(ctx, a->b, ofs); @@ -2897,8 +3099,8 @@ static bool trans_ldc(DisasContext *ctx, arg_ldst *a) static bool trans_stby(DisasContext *ctx, arg_stby *a) { - TCGv_reg ofs, val; - TCGv_tl addr; + TCGv_i64 ofs, val; + TCGv_i64 addr; nullify_over(ctx); @@ -2919,7 +3121,41 @@ static bool trans_stby(DisasContext *ctx, arg_stby *a) } } if (a->m) { - tcg_gen_andi_reg(ofs, ofs, ~3); + tcg_gen_andi_i64(ofs, ofs, ~3); + save_gpr(ctx, a->b, ofs); + } + + return nullify_end(ctx); +} + +static bool trans_stdby(DisasContext *ctx, arg_stby *a) +{ + TCGv_i64 ofs, val; + TCGv_i64 addr; + + if (!ctx->is_pa20) { + return false; + } + nullify_over(ctx); + + form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, + ctx->mmu_idx == MMU_PHYS_IDX); + val = load_gpr(ctx, a->r); + if (a->a) { + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + gen_helper_stdby_e_parallel(tcg_env, addr, val); + } else { + gen_helper_stdby_e(tcg_env, addr, val); + } + } else { + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + gen_helper_stdby_b_parallel(tcg_env, addr, val); + } else { + gen_helper_stdby_b(tcg_env, addr, val); + } + } + if (a->m) { + tcg_gen_andi_i64(ofs, ofs, ~7); save_gpr(ctx, a->b, ofs); } @@ -2950,9 +3186,9 @@ static bool trans_sta(DisasContext *ctx, arg_ldst *a) static bool trans_ldil(DisasContext *ctx, arg_ldil *a) { - TCGv_reg tcg_rt = dest_gpr(ctx, a->t); + TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); - tcg_gen_movi_reg(tcg_rt, a->i); + tcg_gen_movi_i64(tcg_rt, a->i); save_gpr(ctx, a->t, tcg_rt); cond_free(&ctx->null_cond); return true; @@ -2960,10 +3196,10 @@ static bool trans_ldil(DisasContext *ctx, arg_ldil *a) static bool trans_addil(DisasContext *ctx, arg_addil *a) { - TCGv_reg tcg_rt = load_gpr(ctx, a->r); - TCGv_reg tcg_r1 = dest_gpr(ctx, 1); + TCGv_i64 tcg_rt = load_gpr(ctx, a->r); + TCGv_i64 tcg_r1 = dest_gpr(ctx, 1); - tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i); + tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i); save_gpr(ctx, 1, tcg_r1); cond_free(&ctx->null_cond); return true; @@ -2971,75 +3207,100 @@ static bool trans_addil(DisasContext *ctx, arg_addil *a) static bool trans_ldo(DisasContext *ctx, arg_ldo *a) { - TCGv_reg tcg_rt = dest_gpr(ctx, a->t); + TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); /* Special case rb == 0, for the LDI pseudo-op. - The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */ + The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */ if (a->b == 0) { - tcg_gen_movi_reg(tcg_rt, a->i); + tcg_gen_movi_i64(tcg_rt, a->i); } else { - tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i); + tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i); } save_gpr(ctx, a->t, tcg_rt); cond_free(&ctx->null_cond); return true; } -static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1, - unsigned c, unsigned f, unsigned n, int disp) +static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1, + unsigned c, unsigned f, bool d, unsigned n, int disp) { - TCGv_reg dest, in2, sv; + TCGv_i64 dest, in2, sv; DisasCond cond; in2 = load_gpr(ctx, r); - dest = get_temp(ctx); + dest = tcg_temp_new_i64(); - tcg_gen_sub_reg(dest, in1, in2); + tcg_gen_sub_i64(dest, in1, in2); sv = NULL; if (cond_need_sv(c)) { sv = do_sub_sv(ctx, dest, in1, in2); } - cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv); + cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv); return do_cbranch(ctx, disp, n, &cond); } static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) { + if (!ctx->is_pa20 && a->d) { + return false; + } nullify_over(ctx); - return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); + return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), + a->c, a->f, a->d, a->n, a->disp); } static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) { + if (!ctx->is_pa20 && a->d) { + return false; + } nullify_over(ctx); - return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); + return do_cmpb(ctx, a->r, tcg_constant_i64(a->i), + a->c, a->f, a->d, a->n, a->disp); } -static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1, +static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1, unsigned c, unsigned f, unsigned n, int disp) { - TCGv_reg dest, in2, sv, cb_msb; + TCGv_i64 dest, in2, sv, cb_cond; DisasCond cond; + bool d = false; + + /* + * For hppa64, the ADDB conditions change with PSW.W, + * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE. + */ + if (ctx->tb_flags & PSW_W) { + d = c >= 5; + if (d) { + c &= 3; + } + } in2 = load_gpr(ctx, r); - dest = tcg_temp_new(); + dest = tcg_temp_new_i64(); sv = NULL; - cb_msb = NULL; + cb_cond = NULL; if (cond_need_cb(c)) { - cb_msb = get_temp(ctx); - tcg_gen_movi_reg(cb_msb, 0); - tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb); + TCGv_i64 cb = tcg_temp_new_i64(); + TCGv_i64 cb_msb = tcg_temp_new_i64(); + + tcg_gen_movi_i64(cb_msb, 0); + tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb); + tcg_gen_xor_i64(cb, in1, in2); + tcg_gen_xor_i64(cb, cb, dest); + cb_cond = get_carry(ctx, d, cb, cb_msb); } else { - tcg_gen_add_reg(dest, in1, in2); + tcg_gen_add_i64(dest, in1, in2); } if (cond_need_sv(c)) { sv = do_add_sv(ctx, dest, in1, in2); } - cond = do_cond(c * 2 + f, dest, cb_msb, sv); + cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv); save_gpr(ctx, r, dest); return do_cbranch(ctx, disp, n, &cond); } @@ -3053,34 +3314,42 @@ static bool trans_addb(DisasContext *ctx, arg_addb *a) static bool trans_addbi(DisasContext *ctx, arg_addbi *a) { nullify_over(ctx); - return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); + return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp); } static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) { - TCGv_reg tmp, tcg_r; + TCGv_i64 tmp, tcg_r; DisasCond cond; nullify_over(ctx); - tmp = tcg_temp_new(); + tmp = tcg_temp_new_i64(); tcg_r = load_gpr(ctx, a->r); - tcg_gen_shl_reg(tmp, tcg_r, cpu_sar); + if (cond_need_ext(ctx, a->d)) { + /* Force shift into [32,63] */ + tcg_gen_ori_i64(tmp, cpu_sar, 32); + tcg_gen_shl_i64(tmp, tcg_r, tmp); + } else { + tcg_gen_shl_i64(tmp, tcg_r, cpu_sar); + } - cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); + cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); return do_cbranch(ctx, a->disp, a->n, &cond); } static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) { - TCGv_reg tmp, tcg_r; + TCGv_i64 tmp, tcg_r; DisasCond cond; + int p; nullify_over(ctx); - tmp = tcg_temp_new(); + tmp = tcg_temp_new_i64(); tcg_r = load_gpr(ctx, a->r); - tcg_gen_shli_reg(tmp, tcg_r, a->p); + p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0); + tcg_gen_shli_i64(tmp, tcg_r, p); cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); return do_cbranch(ctx, a->disp, a->n, &cond); @@ -3088,178 +3357,246 @@ static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) static bool trans_movb(DisasContext *ctx, arg_movb *a) { - TCGv_reg dest; + TCGv_i64 dest; DisasCond cond; nullify_over(ctx); dest = dest_gpr(ctx, a->r2); if (a->r1 == 0) { - tcg_gen_movi_reg(dest, 0); + tcg_gen_movi_i64(dest, 0); } else { - tcg_gen_mov_reg(dest, cpu_gr[a->r1]); + tcg_gen_mov_i64(dest, cpu_gr[a->r1]); } - cond = do_sed_cond(a->c, dest); + /* All MOVB conditions are 32-bit. */ + cond = do_sed_cond(ctx, a->c, false, dest); return do_cbranch(ctx, a->disp, a->n, &cond); } static bool trans_movbi(DisasContext *ctx, arg_movbi *a) { - TCGv_reg dest; + TCGv_i64 dest; DisasCond cond; nullify_over(ctx); dest = dest_gpr(ctx, a->r); - tcg_gen_movi_reg(dest, a->i); + tcg_gen_movi_i64(dest, a->i); - cond = do_sed_cond(a->c, dest); + /* All MOVBI conditions are 32-bit. */ + cond = do_sed_cond(ctx, a->c, false, dest); return do_cbranch(ctx, a->disp, a->n, &cond); } -static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a) +static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a) { - TCGv_reg dest; + TCGv_i64 dest, src2; + if (!ctx->is_pa20 && a->d) { + return false; + } if (a->c) { nullify_over(ctx); } dest = dest_gpr(ctx, a->t); + src2 = load_gpr(ctx, a->r2); if (a->r1 == 0) { - tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2)); - tcg_gen_shr_reg(dest, dest, cpu_sar); + if (a->d) { + tcg_gen_shr_i64(dest, src2, cpu_sar); + } else { + TCGv_i64 tmp = tcg_temp_new_i64(); + + tcg_gen_ext32u_i64(dest, src2); + tcg_gen_andi_i64(tmp, cpu_sar, 31); + tcg_gen_shr_i64(dest, dest, tmp); + } } else if (a->r1 == a->r2) { - TCGv_i32 t32 = tcg_temp_new_i32(); - tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2)); - tcg_gen_rotr_i32(t32, t32, cpu_sar); - tcg_gen_extu_i32_reg(dest, t32); + if (a->d) { + tcg_gen_rotr_i64(dest, src2, cpu_sar); + } else { + TCGv_i32 t32 = tcg_temp_new_i32(); + TCGv_i32 s32 = tcg_temp_new_i32(); + + tcg_gen_extrl_i64_i32(t32, src2); + tcg_gen_extrl_i64_i32(s32, cpu_sar); + tcg_gen_andi_i32(s32, s32, 31); + tcg_gen_rotr_i32(t32, t32, s32); + tcg_gen_extu_i32_i64(dest, t32); + } } else { - TCGv_i64 t = tcg_temp_new_i64(); - TCGv_i64 s = tcg_temp_new_i64(); + TCGv_i64 src1 = load_gpr(ctx, a->r1); + + if (a->d) { + TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 n = tcg_temp_new_i64(); + + tcg_gen_xori_i64(n, cpu_sar, 63); + tcg_gen_shl_i64(t, src2, n); + tcg_gen_shli_i64(t, t, 1); + tcg_gen_shr_i64(dest, src1, cpu_sar); + tcg_gen_or_i64(dest, dest, t); + } else { + TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 s = tcg_temp_new_i64(); - tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1)); - tcg_gen_extu_reg_i64(s, cpu_sar); - tcg_gen_shr_i64(t, t, s); - tcg_gen_trunc_i64_reg(dest, t); + tcg_gen_concat32_i64(t, src2, src1); + tcg_gen_andi_i64(s, cpu_sar, 31); + tcg_gen_shr_i64(dest, t, s); + } } save_gpr(ctx, a->t, dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); if (a->c) { - ctx->null_cond = do_sed_cond(a->c, dest); + ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); } return nullify_end(ctx); } -static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a) +static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a) { - unsigned sa = 31 - a->cpos; - TCGv_reg dest, t2; + unsigned width, sa; + TCGv_i64 dest, t2; + if (!ctx->is_pa20 && a->d) { + return false; + } if (a->c) { nullify_over(ctx); } + width = a->d ? 64 : 32; + sa = width - 1 - a->cpos; + dest = dest_gpr(ctx, a->t); t2 = load_gpr(ctx, a->r2); if (a->r1 == 0) { - tcg_gen_extract_reg(dest, t2, sa, 32 - sa); - } else if (TARGET_REGISTER_BITS == 32) { - tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa); - } else if (a->r1 == a->r2) { - TCGv_i32 t32 = tcg_temp_new_i32(); - tcg_gen_trunc_reg_i32(t32, t2); - tcg_gen_rotri_i32(t32, t32, sa); - tcg_gen_extu_i32_reg(dest, t32); + tcg_gen_extract_i64(dest, t2, sa, width - sa); + } else if (width == TARGET_LONG_BITS) { + tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa); } else { - TCGv_i64 t64 = tcg_temp_new_i64(); - tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]); - tcg_gen_shri_i64(t64, t64, sa); - tcg_gen_trunc_i64_reg(dest, t64); + assert(!a->d); + if (a->r1 == a->r2) { + TCGv_i32 t32 = tcg_temp_new_i32(); + tcg_gen_extrl_i64_i32(t32, t2); + tcg_gen_rotri_i32(t32, t32, sa); + tcg_gen_extu_i32_i64(dest, t32); + } else { + tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]); + tcg_gen_extract_i64(dest, dest, sa, 32); + } } save_gpr(ctx, a->t, dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); if (a->c) { - ctx->null_cond = do_sed_cond(a->c, dest); + ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); } return nullify_end(ctx); } -static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a) +static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a) { - unsigned len = 32 - a->clen; - TCGv_reg dest, src, tmp; + unsigned widthm1 = a->d ? 63 : 31; + TCGv_i64 dest, src, tmp; + if (!ctx->is_pa20 && a->d) { + return false; + } if (a->c) { nullify_over(ctx); } dest = dest_gpr(ctx, a->t); src = load_gpr(ctx, a->r); - tmp = tcg_temp_new(); + tmp = tcg_temp_new_i64(); /* Recall that SAR is using big-endian bit numbering. */ - tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1); + tcg_gen_andi_i64(tmp, cpu_sar, widthm1); + tcg_gen_xori_i64(tmp, tmp, widthm1); + if (a->se) { - tcg_gen_sar_reg(dest, src, tmp); - tcg_gen_sextract_reg(dest, dest, 0, len); + if (!a->d) { + tcg_gen_ext32s_i64(dest, src); + src = dest; + } + tcg_gen_sar_i64(dest, src, tmp); + tcg_gen_sextract_i64(dest, dest, 0, a->len); } else { - tcg_gen_shr_reg(dest, src, tmp); - tcg_gen_extract_reg(dest, dest, 0, len); + if (!a->d) { + tcg_gen_ext32u_i64(dest, src); + src = dest; + } + tcg_gen_shr_i64(dest, src, tmp); + tcg_gen_extract_i64(dest, dest, 0, a->len); } save_gpr(ctx, a->t, dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); if (a->c) { - ctx->null_cond = do_sed_cond(a->c, dest); + ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); } return nullify_end(ctx); } -static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a) +static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a) { - unsigned len = 32 - a->clen; - unsigned cpos = 31 - a->pos; - TCGv_reg dest, src; + unsigned len, cpos, width; + TCGv_i64 dest, src; + if (!ctx->is_pa20 && a->d) { + return false; + } if (a->c) { nullify_over(ctx); } + len = a->len; + width = a->d ? 64 : 32; + cpos = width - 1 - a->pos; + if (cpos + len > width) { + len = width - cpos; + } + dest = dest_gpr(ctx, a->t); src = load_gpr(ctx, a->r); if (a->se) { - tcg_gen_sextract_reg(dest, src, cpos, len); + tcg_gen_sextract_i64(dest, src, cpos, len); } else { - tcg_gen_extract_reg(dest, src, cpos, len); + tcg_gen_extract_i64(dest, src, cpos, len); } save_gpr(ctx, a->t, dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); if (a->c) { - ctx->null_cond = do_sed_cond(a->c, dest); + ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); } return nullify_end(ctx); } -static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a) +static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a) { - unsigned len = 32 - a->clen; - target_sreg mask0, mask1; - TCGv_reg dest; + unsigned len, width; + uint64_t mask0, mask1; + TCGv_i64 dest; + if (!ctx->is_pa20 && a->d) { + return false; + } if (a->c) { nullify_over(ctx); } - if (a->cpos + len > 32) { - len = 32 - a->cpos; + + len = a->len; + width = a->d ? 64 : 32; + if (a->cpos + len > width) { + len = width - a->cpos; } dest = dest_gpr(ctx, a->t); @@ -3267,110 +3604,122 @@ static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a) mask1 = deposit64(-1, a->cpos, len, a->i); if (a->nz) { - TCGv_reg src = load_gpr(ctx, a->t); - if (mask1 != -1) { - tcg_gen_andi_reg(dest, src, mask1); - src = dest; - } - tcg_gen_ori_reg(dest, src, mask0); + TCGv_i64 src = load_gpr(ctx, a->t); + tcg_gen_andi_i64(dest, src, mask1); + tcg_gen_ori_i64(dest, dest, mask0); } else { - tcg_gen_movi_reg(dest, mask0); + tcg_gen_movi_i64(dest, mask0); } save_gpr(ctx, a->t, dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); if (a->c) { - ctx->null_cond = do_sed_cond(a->c, dest); + ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); } return nullify_end(ctx); } -static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a) +static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a) { unsigned rs = a->nz ? a->t : 0; - unsigned len = 32 - a->clen; - TCGv_reg dest, val; + unsigned len, width; + TCGv_i64 dest, val; + if (!ctx->is_pa20 && a->d) { + return false; + } if (a->c) { nullify_over(ctx); } - if (a->cpos + len > 32) { - len = 32 - a->cpos; + + len = a->len; + width = a->d ? 64 : 32; + if (a->cpos + len > width) { + len = width - a->cpos; } dest = dest_gpr(ctx, a->t); val = load_gpr(ctx, a->r); if (rs == 0) { - tcg_gen_deposit_z_reg(dest, val, a->cpos, len); + tcg_gen_deposit_z_i64(dest, val, a->cpos, len); } else { - tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len); + tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len); } save_gpr(ctx, a->t, dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); if (a->c) { - ctx->null_cond = do_sed_cond(a->c, dest); + ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); } return nullify_end(ctx); } -static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c, - unsigned nz, unsigned clen, TCGv_reg val) +static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c, + bool d, bool nz, unsigned len, TCGv_i64 val) { unsigned rs = nz ? rt : 0; - unsigned len = 32 - clen; - TCGv_reg mask, tmp, shift, dest; - unsigned msb = 1U << (len - 1); + unsigned widthm1 = d ? 63 : 31; + TCGv_i64 mask, tmp, shift, dest; + uint64_t msb = 1ULL << (len - 1); dest = dest_gpr(ctx, rt); - shift = tcg_temp_new(); - tmp = tcg_temp_new(); + shift = tcg_temp_new_i64(); + tmp = tcg_temp_new_i64(); /* Convert big-endian bit numbering in SAR to left-shift. */ - tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1); + tcg_gen_andi_i64(shift, cpu_sar, widthm1); + tcg_gen_xori_i64(shift, shift, widthm1); - mask = tcg_temp_new(); - tcg_gen_movi_reg(mask, msb + (msb - 1)); - tcg_gen_and_reg(tmp, val, mask); + mask = tcg_temp_new_i64(); + tcg_gen_movi_i64(mask, msb + (msb - 1)); + tcg_gen_and_i64(tmp, val, mask); if (rs) { - tcg_gen_shl_reg(mask, mask, shift); - tcg_gen_shl_reg(tmp, tmp, shift); - tcg_gen_andc_reg(dest, cpu_gr[rs], mask); - tcg_gen_or_reg(dest, dest, tmp); + tcg_gen_shl_i64(mask, mask, shift); + tcg_gen_shl_i64(tmp, tmp, shift); + tcg_gen_andc_i64(dest, cpu_gr[rs], mask); + tcg_gen_or_i64(dest, dest, tmp); } else { - tcg_gen_shl_reg(dest, tmp, shift); + tcg_gen_shl_i64(dest, tmp, shift); } save_gpr(ctx, rt, dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); if (c) { - ctx->null_cond = do_sed_cond(c, dest); + ctx->null_cond = do_sed_cond(ctx, c, d, dest); } return nullify_end(ctx); } -static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a) +static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a) { + if (!ctx->is_pa20 && a->d) { + return false; + } if (a->c) { nullify_over(ctx); } - return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r)); + return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, + load_gpr(ctx, a->r)); } -static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a) +static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a) { + if (!ctx->is_pa20 && a->d) { + return false; + } if (a->c) { nullify_over(ctx); } - return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i)); + return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, + tcg_constant_i64(a->i)); } static bool trans_be(DisasContext *ctx, arg_be *a) { - TCGv_reg tmp; + TCGv_i64 tmp; #ifdef CONFIG_USER_ONLY /* ??? It seems like there should be a good way of using @@ -3388,8 +3737,8 @@ static bool trans_be(DisasContext *ctx, arg_be *a) nullify_over(ctx); #endif - tmp = get_temp(ctx); - tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp); + tmp = tcg_temp_new_i64(); + tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp); tmp = do_ibranch_priv(ctx, tmp); #ifdef CONFIG_USER_ONLY @@ -3399,20 +3748,21 @@ static bool trans_be(DisasContext *ctx, arg_be *a) load_spr(ctx, new_spc, a->sp); if (a->l) { - copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); + copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f); } if (a->n && use_nullify_skip(ctx)) { - tcg_gen_mov_reg(cpu_iaoq_f, tmp); - tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); + copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp); + tcg_gen_addi_i64(tmp, tmp, 4); + copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); tcg_gen_mov_i64(cpu_iasq_f, new_spc); tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f); } else { - copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); + copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); if (ctx->iaoq_b == -1) { tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); } - tcg_gen_mov_reg(cpu_iaoq_b, tmp); + copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); tcg_gen_mov_i64(cpu_iasq_b, new_spc); nullify_set(ctx, a->n); } @@ -3429,7 +3779,7 @@ static bool trans_bl(DisasContext *ctx, arg_bl *a) static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) { - target_ureg dest = iaoq_dest(ctx, a->disp); + uint64_t dest = iaoq_dest(ctx, a->disp); nullify_over(ctx); @@ -3471,11 +3821,11 @@ static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) #endif if (a->l) { - TCGv_reg tmp = dest_gpr(ctx, a->l); + TCGv_i64 tmp = dest_gpr(ctx, a->l); if (ctx->privilege < 3) { - tcg_gen_andi_reg(tmp, tmp, -4); + tcg_gen_andi_i64(tmp, tmp, -4); } - tcg_gen_ori_reg(tmp, tmp, ctx->privilege); + tcg_gen_ori_i64(tmp, tmp, ctx->privilege); save_gpr(ctx, a->l, tmp); } @@ -3485,9 +3835,9 @@ static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) static bool trans_blr(DisasContext *ctx, arg_blr *a) { if (a->x) { - TCGv_reg tmp = get_temp(ctx); - tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3); - tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8); + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3); + tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8); /* The computation here never changes privilege level. */ return do_ibranch(ctx, tmp, a->l, a->n); } else { @@ -3498,14 +3848,14 @@ static bool trans_blr(DisasContext *ctx, arg_blr *a) static bool trans_bv(DisasContext *ctx, arg_bv *a) { - TCGv_reg dest; + TCGv_i64 dest; if (a->x == 0) { dest = load_gpr(ctx, a->b); } else { - dest = get_temp(ctx); - tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3); - tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b)); + dest = tcg_temp_new_i64(); + tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3); + tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b)); } dest = do_ibranch_priv(ctx, dest); return do_ibranch(ctx, dest, 0, a->n); @@ -3513,7 +3863,7 @@ static bool trans_bv(DisasContext *ctx, arg_bv *a) static bool trans_bve(DisasContext *ctx, arg_bve *a) { - TCGv_reg dest; + TCGv_i64 dest; #ifdef CONFIG_USER_ONLY dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); @@ -3522,14 +3872,14 @@ static bool trans_bve(DisasContext *ctx, arg_bve *a) nullify_over(ctx); dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); - copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); + copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); if (ctx->iaoq_b == -1) { tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); } - copy_iaoq_entry(cpu_iaoq_b, -1, dest); + copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest); tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest)); if (a->l) { - copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var); + copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var); } nullify_set(ctx, a->n); tcg_gen_lookup_and_goto_ptr(); @@ -3538,6 +3888,12 @@ static bool trans_bve(DisasContext *ctx, arg_bve *a) #endif } +static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a) +{ + /* All branch target stack instructions implement as nop. */ + return ctx->is_pa20; +} + /* * Float class 0 */ @@ -3551,7 +3907,7 @@ static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) { uint64_t ret; - if (TARGET_REGISTER_BITS == 64) { + if (ctx->is_pa20) { ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */ } else { ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */ @@ -3830,12 +4186,12 @@ static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) static bool trans_ftest(DisasContext *ctx, arg_ftest *a) { - TCGv_reg t; + TCGv_i64 t; nullify_over(ctx); - t = get_temp(ctx); - tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow)); + t = tcg_temp_new_i64(); + tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow)); if (a->y == 1) { int mask; @@ -3843,7 +4199,7 @@ static bool trans_ftest(DisasContext *ctx, arg_ftest *a) switch (a->c) { case 0: /* simple */ - tcg_gen_andi_reg(t, t, 0x4000000); + tcg_gen_andi_i64(t, t, 0x4000000); ctx->null_cond = cond_make_0(TCG_COND_NE, t); goto done; case 2: /* rej */ @@ -3872,17 +4228,17 @@ static bool trans_ftest(DisasContext *ctx, arg_ftest *a) return true; } if (inv) { - TCGv_reg c = load_const(ctx, mask); - tcg_gen_or_reg(t, t, c); + TCGv_i64 c = tcg_constant_i64(mask); + tcg_gen_or_i64(t, t, c); ctx->null_cond = cond_make(TCG_COND_EQ, t, c); } else { - tcg_gen_andi_reg(t, t, mask); + tcg_gen_andi_i64(t, t, mask); ctx->null_cond = cond_make_0(TCG_COND_EQ, t); } } else { unsigned cbit = (a->y ^ 1) - 1; - tcg_gen_extract_reg(t, t, 21 - cbit, 1); + tcg_gen_extract_i64(t, t, 21 - cbit, 1); ctx->null_cond = cond_make_0(TCG_COND_NE, t); } @@ -4062,6 +4418,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->cs = cs; ctx->tb_flags = ctx->base.tb->flags; + ctx->is_pa20 = hppa_is_pa20(cpu_env(cs)); #ifdef CONFIG_USER_ONLY ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX); @@ -4071,8 +4428,9 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); #else ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; - ctx->mmu_idx = (ctx->tb_flags & PSW_D ? - PRIV_TO_MMU_IDX(ctx->privilege) : MMU_PHYS_IDX); + ctx->mmu_idx = (ctx->tb_flags & PSW_D + ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P) + : MMU_PHYS_IDX); /* Recover the IAOQ values from the GVA + PRIV. */ uint64_t cs_base = ctx->base.tb->cs_base; @@ -4085,14 +4443,11 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->iaoq_n = -1; ctx->iaoq_n_var = NULL; + ctx->zero = tcg_constant_i64(0); + /* Bound the number of instructions by those left on the page. */ bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; ctx->base.max_insns = MIN(ctx->base.max_insns, bound); - - ctx->ntempr = 0; - ctx->ntempl = 0; - memset(ctx->tempr, 0, sizeof(ctx->tempr)); - memset(ctx->templ, 0, sizeof(ctx->templ)); } static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) @@ -4113,7 +4468,8 @@ static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); - tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b); + tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0); + ctx->insn_start = tcg_last_op(); } static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) @@ -4121,7 +4477,6 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) DisasContext *ctx = container_of(dcbase, DisasContext, base); CPUHPPAState *env = cpu_env(cs); DisasJumpType ret; - int i, n; /* Execute one insn. */ #ifdef CONFIG_USER_ONLY @@ -4140,8 +4495,8 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) This will be overwritten by a branch. */ if (ctx->iaoq_b == -1) { ctx->iaoq_n = -1; - ctx->iaoq_n_var = get_temp(ctx); - tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); + ctx->iaoq_n_var = tcg_temp_new_i64(); + tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4); } else { ctx->iaoq_n = ctx->iaoq_b + 4; ctx->iaoq_n_var = NULL; @@ -4160,16 +4515,6 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) } } - /* Forget any temporaries allocated. */ - for (i = 0, n = ctx->ntempr; i < n; ++i) { - ctx->tempr[i] = NULL; - } - for (i = 0, n = ctx->ntempl; i < n; ++i) { - ctx->templ[i] = NULL; - } - ctx->ntempr = 0; - ctx->ntempl = 0; - /* Advance the insn queue. Note that this check also detects a priority change within the instruction queue. */ if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) { @@ -4197,8 +4542,8 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) case DISAS_IAQ_N_STALE: case DISAS_IAQ_N_STALE_EXIT: if (ctx->iaoq_f == -1) { - tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b); - copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); + copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b); + copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); #ifndef CONFIG_USER_ONLY tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); #endif @@ -4207,7 +4552,7 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) ? DISAS_EXIT : DISAS_IAQ_N_UPDATED); } else if (ctx->iaoq_b == -1) { - tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var); + copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var); } break; @@ -4227,8 +4572,8 @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) case DISAS_TOO_MANY: case DISAS_IAQ_N_STALE: case DISAS_IAQ_N_STALE_EXIT: - copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); - copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); + copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); + copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); nullify_save(ctx); /* FALLTHRU */ case DISAS_IAQ_N_UPDATED: diff --git a/target/i386/cpu-qom.h b/target/i386/cpu-qom.h index 2350f4ae60..d4e216d000 100644 --- a/target/i386/cpu-qom.h +++ b/target/i386/cpu-qom.h @@ -21,8 +21,6 @@ #define QEMU_I386_CPU_QOM_H #include "hw/core/cpu.h" -#include "qemu/notify.h" -#include "qom/object.h" #ifdef TARGET_X86_64 #define TYPE_X86_CPU "x86_64-cpu" @@ -32,43 +30,7 @@ OBJECT_DECLARE_CPU_TYPE(X86CPU, X86CPUClass, X86_CPU) -typedef struct X86CPUModel X86CPUModel; - -/** - * X86CPUClass: - * @cpu_def: CPU model definition - * @host_cpuid_required: Whether CPU model requires cpuid from host. - * @ordering: Ordering on the "-cpu help" CPU model list. - * @migration_safe: See CpuDefinitionInfo::migration_safe - * @static_model: See CpuDefinitionInfo::static - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * An x86 CPU model or family. - */ -struct X86CPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - /* CPU definition, automatically loaded by instance_init if not NULL. - * Should be eventually replaced by subclass-specific property defaults. - */ - X86CPUModel *model; - - bool host_cpuid_required; - int ordering; - bool migration_safe; - bool static_model; - - /* Optional description of CPU model. - * If unavailable, cpu_def->model_id is used */ - const char *model_description; - - DeviceRealize parent_realize; - DeviceUnrealize parent_unrealize; - ResettablePhases parent_phases; -}; - +#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU +#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) #endif diff --git a/target/i386/cpu.c b/target/i386/cpu.c index fc8484cb5e..358d9c0a65 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -6019,7 +6019,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, X86CPUTopoInfo topo_info; topo_info.dies_per_pkg = env->nr_dies; - topo_info.cores_per_die = cs->nr_cores; + topo_info.cores_per_die = cs->nr_cores / env->nr_dies; topo_info.threads_per_core = cs->nr_threads; /* Calculate & apply limits for different index ranges */ @@ -6095,8 +6095,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, */ if (*eax & 31) { int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14); - int vcpus_per_socket = env->nr_dies * cs->nr_cores * - cs->nr_threads; + int vcpus_per_socket = cs->nr_cores * cs->nr_threads; if (cs->nr_cores > 1) { *eax &= ~0xFC000000; *eax |= (pow2ceil(cs->nr_cores) - 1) << 26; @@ -6273,12 +6272,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; case 1: *eax = apicid_die_offset(&topo_info); - *ebx = cs->nr_cores * cs->nr_threads; + *ebx = topo_info.cores_per_die * topo_info.threads_per_core; *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; break; case 2: *eax = apicid_pkg_offset(&topo_info); - *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; + *ebx = cs->nr_cores * cs->nr_threads; *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; break; default: diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 471e71dbc5..cd2e295bd6 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1882,6 +1882,7 @@ typedef struct CPUArchState { TPRAccess tpr_access_type; + /* Number of dies within this CPU package. */ unsigned nr_dies; } CPUX86State; @@ -1897,9 +1898,7 @@ struct kvm_msrs; * An x86 CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUX86State env; VMChangeStateEntry *vmsentry; @@ -2039,6 +2038,44 @@ struct ArchCPU { bool xen_vapic; }; +typedef struct X86CPUModel X86CPUModel; + +/** + * X86CPUClass: + * @cpu_def: CPU model definition + * @host_cpuid_required: Whether CPU model requires cpuid from host. + * @ordering: Ordering on the "-cpu help" CPU model list. + * @migration_safe: See CpuDefinitionInfo::migration_safe + * @static_model: See CpuDefinitionInfo::static + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * An x86 CPU model or family. + */ +struct X86CPUClass { + CPUClass parent_class; + + /* + * CPU definition, automatically loaded by instance_init if not NULL. + * Should be eventually replaced by subclass-specific property defaults. + */ + X86CPUModel *model; + + bool host_cpuid_required; + int ordering; + bool migration_safe; + bool static_model; + + /* + * Optional description of CPU model. + * If unavailable, cpu_def->model_id is used. + */ + const char *model_description; + + DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; + ResettablePhases parent_phases; +}; #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_x86_cpu; @@ -2241,8 +2278,6 @@ void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); /* hw/pc.c */ uint64_t cpu_get_tsc(CPUX86State *env); -#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU -#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_X86_CPU #ifdef TARGET_X86_64 diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index cb2cd0b02f..20b9ca3ef5 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -591,9 +591,9 @@ int hvf_vcpu_exec(CPUState *cpu) { load_regs(cpu); if (exit_reason == EXIT_REASON_RDMSR) { - simulate_rdmsr(cpu); + simulate_rdmsr(env); } else { - simulate_wrmsr(cpu); + simulate_wrmsr(env); } env->eip += ins_len; store_regs(cpu); diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c index ccda568478..3a3f0a50d0 100644 --- a/target/i386/hvf/x86_emu.c +++ b/target/i386/hvf/x86_emu.c @@ -45,7 +45,7 @@ #include "vmcs.h" #include "vmx.h" -void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, +void hvf_handle_io(CPUState *cs, uint16_t port, void *data, int direction, int size, uint32_t count); #define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \ @@ -663,35 +663,34 @@ static void exec_lods(CPUX86State *env, struct x86_decode *decode) env->eip += decode->len; } -void simulate_rdmsr(struct CPUState *cpu) +void simulate_rdmsr(CPUX86State *env) { - X86CPU *x86_cpu = X86_CPU(cpu); - CPUX86State *env = &x86_cpu->env; + X86CPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); uint32_t msr = ECX(env); uint64_t val = 0; switch (msr) { case MSR_IA32_TSC: - val = rdtscp() + rvmcs(cpu->accel->fd, VMCS_TSC_OFFSET); + val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET); break; case MSR_IA32_APICBASE: - val = cpu_get_apic_base(X86_CPU(cpu)->apic_state); + val = cpu_get_apic_base(cpu->apic_state); break; case MSR_IA32_UCODE_REV: - val = x86_cpu->ucode_rev; + val = cpu->ucode_rev; break; case MSR_EFER: - val = rvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER); + val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER); break; case MSR_FSBASE: - val = rvmcs(cpu->accel->fd, VMCS_GUEST_FS_BASE); + val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE); break; case MSR_GSBASE: - val = rvmcs(cpu->accel->fd, VMCS_GUEST_GS_BASE); + val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE); break; case MSR_KERNELGSBASE: - val = rvmcs(cpu->accel->fd, VMCS_HOST_FS_BASE); + val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE); break; case MSR_STAR: abort(); @@ -746,7 +745,7 @@ void simulate_rdmsr(struct CPUState *cpu) val = env->mtrr_deftype; break; case MSR_CORE_THREAD_COUNT: - val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */ + val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */ val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */ break; default: @@ -761,14 +760,14 @@ void simulate_rdmsr(struct CPUState *cpu) static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode) { - simulate_rdmsr(env_cpu(env)); + simulate_rdmsr(env); env->eip += decode->len; } -void simulate_wrmsr(struct CPUState *cpu) +void simulate_wrmsr(CPUX86State *env) { - X86CPU *x86_cpu = X86_CPU(cpu); - CPUX86State *env = &x86_cpu->env; + X86CPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); uint32_t msr = ECX(env); uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env); @@ -776,16 +775,16 @@ void simulate_wrmsr(struct CPUState *cpu) case MSR_IA32_TSC: break; case MSR_IA32_APICBASE: - cpu_set_apic_base(X86_CPU(cpu)->apic_state, data); + cpu_set_apic_base(cpu->apic_state, data); break; case MSR_FSBASE: - wvmcs(cpu->accel->fd, VMCS_GUEST_FS_BASE, data); + wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data); break; case MSR_GSBASE: - wvmcs(cpu->accel->fd, VMCS_GUEST_GS_BASE, data); + wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data); break; case MSR_KERNELGSBASE: - wvmcs(cpu->accel->fd, VMCS_HOST_FS_BASE, data); + wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data); break; case MSR_STAR: abort(); @@ -797,10 +796,10 @@ void simulate_wrmsr(struct CPUState *cpu) abort(); break; case MSR_EFER: - /*printf("new efer %llx\n", EFER(cpu));*/ - wvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER, data); + /*printf("new efer %llx\n", EFER(cs));*/ + wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data); if (data & MSR_EFER_NXE) { - hv_vcpu_invalidate_tlb(cpu->accel->fd); + hv_vcpu_invalidate_tlb(cs->accel->fd); } break; case MSR_MTRRphysBase(0): @@ -849,14 +848,14 @@ void simulate_wrmsr(struct CPUState *cpu) /* Related to support known hypervisor interface */ /* if (g_hypervisor_iface) - g_hypervisor_iface->wrmsr_handler(cpu, msr, data); + g_hypervisor_iface->wrmsr_handler(cs, msr, data); - printf("write msr %llx\n", RCX(cpu));*/ + printf("write msr %llx\n", RCX(cs));*/ } static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode) { - simulate_wrmsr(env_cpu(env)); + simulate_wrmsr(env); env->eip += decode->len; } @@ -1418,56 +1417,56 @@ static void init_cmd_handler() } } -void load_regs(struct CPUState *cpu) +void load_regs(CPUState *cs) { - X86CPU *x86_cpu = X86_CPU(cpu); - CPUX86State *env = &x86_cpu->env; + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; int i = 0; - RRX(env, R_EAX) = rreg(cpu->accel->fd, HV_X86_RAX); - RRX(env, R_EBX) = rreg(cpu->accel->fd, HV_X86_RBX); - RRX(env, R_ECX) = rreg(cpu->accel->fd, HV_X86_RCX); - RRX(env, R_EDX) = rreg(cpu->accel->fd, HV_X86_RDX); - RRX(env, R_ESI) = rreg(cpu->accel->fd, HV_X86_RSI); - RRX(env, R_EDI) = rreg(cpu->accel->fd, HV_X86_RDI); - RRX(env, R_ESP) = rreg(cpu->accel->fd, HV_X86_RSP); - RRX(env, R_EBP) = rreg(cpu->accel->fd, HV_X86_RBP); + RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX); + RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX); + RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX); + RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX); + RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI); + RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI); + RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP); + RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP); for (i = 8; i < 16; i++) { - RRX(env, i) = rreg(cpu->accel->fd, HV_X86_RAX + i); + RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i); } - env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); + env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); rflags_to_lflags(env); - env->eip = rreg(cpu->accel->fd, HV_X86_RIP); + env->eip = rreg(cs->accel->fd, HV_X86_RIP); } -void store_regs(struct CPUState *cpu) +void store_regs(CPUState *cs) { - X86CPU *x86_cpu = X86_CPU(cpu); - CPUX86State *env = &x86_cpu->env; + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; int i = 0; - wreg(cpu->accel->fd, HV_X86_RAX, RAX(env)); - wreg(cpu->accel->fd, HV_X86_RBX, RBX(env)); - wreg(cpu->accel->fd, HV_X86_RCX, RCX(env)); - wreg(cpu->accel->fd, HV_X86_RDX, RDX(env)); - wreg(cpu->accel->fd, HV_X86_RSI, RSI(env)); - wreg(cpu->accel->fd, HV_X86_RDI, RDI(env)); - wreg(cpu->accel->fd, HV_X86_RBP, RBP(env)); - wreg(cpu->accel->fd, HV_X86_RSP, RSP(env)); + wreg(cs->accel->fd, HV_X86_RAX, RAX(env)); + wreg(cs->accel->fd, HV_X86_RBX, RBX(env)); + wreg(cs->accel->fd, HV_X86_RCX, RCX(env)); + wreg(cs->accel->fd, HV_X86_RDX, RDX(env)); + wreg(cs->accel->fd, HV_X86_RSI, RSI(env)); + wreg(cs->accel->fd, HV_X86_RDI, RDI(env)); + wreg(cs->accel->fd, HV_X86_RBP, RBP(env)); + wreg(cs->accel->fd, HV_X86_RSP, RSP(env)); for (i = 8; i < 16; i++) { - wreg(cpu->accel->fd, HV_X86_RAX + i, RRX(env, i)); + wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i)); } lflags_to_rflags(env); - wreg(cpu->accel->fd, HV_X86_RFLAGS, env->eflags); - macvm_set_rip(cpu, env->eip); + wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags); + macvm_set_rip(cs, env->eip); } bool exec_instruction(CPUX86State *env, struct x86_decode *ins) { - /*if (hvf_vcpu_id(cpu)) - printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), env->eip, + /*if (hvf_vcpu_id(cs)) + printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cs), env->eip, decode_cmd_to_string(ins->cmd));*/ if (!_cmd_handler[ins->cmd].handler) { diff --git a/target/i386/hvf/x86_emu.h b/target/i386/hvf/x86_emu.h index 640da90b30..4b846ba80e 100644 --- a/target/i386/hvf/x86_emu.h +++ b/target/i386/hvf/x86_emu.h @@ -29,8 +29,8 @@ bool exec_instruction(CPUX86State *env, struct x86_decode *ins); void load_regs(struct CPUState *cpu); void store_regs(struct CPUState *cpu); -void simulate_rdmsr(struct CPUState *cpu); -void simulate_wrmsr(struct CPUState *cpu); +void simulate_rdmsr(CPUX86State *env); +void simulate_wrmsr(CPUX86State *env); target_ulong read_reg(CPUX86State *env, int reg, int size); void write_reg(CPUX86State *env, int reg, target_ulong val, int size); diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index 56c72f3c45..9c791b7b05 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -37,6 +37,7 @@ static bool kvm_cpu_realizefn(CPUState *cs, Error **errp) * -> cpu_exec_realizefn(): * -> accel_cpu_common_realize() * kvm_cpu_realizefn() -> host_cpu_realizefn() + * -> cpu_common_realizefn() * -> check/update ucode_rev, phys_bits, mwait */ if (cpu->max_features) { diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 770e81d56e..11b8177eff 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -1837,6 +1837,10 @@ int kvm_arch_init_vcpu(CPUState *cs) c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; c->ebx = cs->cpu_index; } + + if (cs->kvm_state->xen_version >= XEN_VERSION(4, 17)) { + c->eax |= XEN_HVM_CPUID_UPCALL_VECTOR; + } } r = kvm_xen_init_vcpu(cs); diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index 75b2c557b9..c0631f9cf4 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -28,6 +28,7 @@ #include "hw/i386/kvm/xen_overlay.h" #include "hw/i386/kvm/xen_evtchn.h" #include "hw/i386/kvm/xen_gnttab.h" +#include "hw/i386/kvm/xen_primary_console.h" #include "hw/i386/kvm/xen_xenstore.h" #include "hw/xen/interface/version.h" @@ -182,7 +183,8 @@ int kvm_xen_init(KVMState *s, uint32_t hypercall_msr) return ret; } - /* The page couldn't be overlaid until KVM was initialized */ + /* The pages couldn't be overlaid until KVM was initialized */ + xen_primary_console_reset(); xen_xenstore_reset(); return 0; @@ -812,11 +814,23 @@ static bool handle_get_param(struct kvm_xen_exit *exit, X86CPU *cpu, case HVM_PARAM_STORE_EVTCHN: hp.value = xen_xenstore_get_port(); break; + case HVM_PARAM_CONSOLE_PFN: + hp.value = xen_primary_console_get_pfn(); + if (!hp.value) { + err = -EINVAL; + } + break; + case HVM_PARAM_CONSOLE_EVTCHN: + hp.value = xen_primary_console_get_port(); + if (!hp.value) { + err = -EINVAL; + } + break; default: return false; } - if (kvm_copy_to_gva(cs, arg, &hp, sizeof(hp))) { + if (!err && kvm_copy_to_gva(cs, arg, &hp, sizeof(hp))) { err = -EFAULT; } out: @@ -1077,17 +1091,13 @@ static int vcpuop_stop_periodic_timer(CPUState *target) * Must always be called with xen_timers_lock held. */ static int do_set_singleshot_timer(CPUState *cs, uint64_t timeout_abs, - bool future, bool linux_wa) + bool linux_wa) { CPUX86State *env = &X86_CPU(cs)->env; int64_t now = kvm_get_current_ns(); int64_t qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); int64_t delta = timeout_abs - now; - if (future && timeout_abs < now) { - return -ETIME; - } - if (linux_wa && unlikely((int64_t)timeout_abs < 0 || (delta > 0 && (uint32_t)(delta >> 50) != 0))) { /* @@ -1129,9 +1139,13 @@ static int vcpuop_set_singleshot_timer(CPUState *cs, uint64_t arg) } QEMU_LOCK_GUARD(&X86_CPU(cs)->env.xen_timers_lock); - return do_set_singleshot_timer(cs, sst.timeout_abs_ns, - !!(sst.flags & VCPU_SSHOTTMR_future), - false); + + /* + * We ignore the VCPU_SSHOTTMR_future flag, just as Xen now does. + * The only guest that ever used it, got it wrong. + * https://xenbits.xen.org/gitweb/?p=xen.git;a=commitdiff;h=19c6cbd909 + */ + return do_set_singleshot_timer(cs, sst.timeout_abs_ns, false); } static int vcpuop_stop_singleshot_timer(CPUState *cs) @@ -1156,7 +1170,7 @@ static bool kvm_xen_hcall_set_timer_op(struct kvm_xen_exit *exit, X86CPU *cpu, err = vcpuop_stop_singleshot_timer(CPU(cpu)); } else { QEMU_LOCK_GUARD(&X86_CPU(cpu)->env.xen_timers_lock); - err = do_set_singleshot_timer(CPU(cpu), timeout, false, true); + err = do_set_singleshot_timer(CPU(cpu), timeout, true); } exit->u.hcall.result = err; return true; @@ -1427,6 +1441,11 @@ int kvm_xen_soft_reset(void) return err; } + err = xen_primary_console_reset(); + if (err) { + return err; + } + err = xen_xenstore_reset(); if (err) { return err; @@ -1844,7 +1863,7 @@ int kvm_put_xen_state(CPUState *cs) QEMU_LOCK_GUARD(&env->xen_timers_lock); if (env->xen_singleshot_timer_ns) { ret = do_set_singleshot_timer(cs, env->xen_singleshot_timer_ns, - false, false); + false); if (ret < 0) { return ret; } diff --git a/target/i386/monitor.c b/target/i386/monitor.c index 6512846327..950ff9ccbc 100644 --- a/target/i386/monitor.c +++ b/target/i386/monitor.c @@ -28,6 +28,7 @@ #include "monitor/hmp-target.h" #include "monitor/hmp.h" #include "qapi/qmp/qdict.h" +#include "sysemu/hw_accel.h" #include "sysemu/kvm.h" #include "qapi/error.h" #include "qapi/qapi-commands-misc-target.h" @@ -654,7 +655,11 @@ void hmp_info_local_apic(Monitor *mon, const QDict *qdict) if (qdict_haskey(qdict, "apic-id")) { int id = qdict_get_try_int(qdict, "apic-id", 0); + cs = cpu_by_arch_id(id); + if (cs) { + cpu_synchronize_state(cs); + } } else { cs = mon_get_cpu(mon); } diff --git a/target/loongarch/cpu-qom.h b/target/loongarch/cpu-qom.h new file mode 100644 index 0000000000..fa3fcf7186 --- /dev/null +++ b/target/loongarch/cpu-qom.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch CPU QOM header (target agnostic) + * + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_CPU_QOM_H +#define LOONGARCH_CPU_QOM_H + +#include "hw/core/cpu.h" + +#define TYPE_LOONGARCH_CPU "loongarch-cpu" +#define TYPE_LOONGARCH32_CPU "loongarch32-cpu" +#define TYPE_LOONGARCH64_CPU "loongarch64-cpu" + +OBJECT_DECLARE_CPU_TYPE(LoongArchCPU, LoongArchCPUClass, + LOONGARCH_CPU) + +#define LOONGARCH_CPU_TYPE_SUFFIX "-" TYPE_LOONGARCH_CPU +#define LOONGARCH_CPU_TYPE_NAME(model) model LOONGARCH_CPU_TYPE_SUFFIX + +#endif diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index a60d07acd5..fc075952e6 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -721,8 +721,7 @@ static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) } } - if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU) - && !object_class_is_abstract(oc)) { + if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU)) { return oc; } return NULL; diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index 9d0f79f814..00d1fba597 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -17,6 +17,7 @@ #include "exec/memory.h" #endif #include "cpu-csr.h" +#include "cpu-qom.h" #define IOCSRF_TEMP 0 #define IOCSRF_NODECNT 1 @@ -371,9 +372,7 @@ typedef struct CPUArchState { * A LoongArch CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPULoongArchState env; QEMUTimer timer; @@ -383,13 +382,6 @@ struct ArchCPU { const char *dtb_compatible; }; -#define TYPE_LOONGARCH_CPU "loongarch-cpu" -#define TYPE_LOONGARCH32_CPU "loongarch32-cpu" -#define TYPE_LOONGARCH64_CPU "loongarch64-cpu" - -OBJECT_DECLARE_CPU_TYPE(LoongArchCPU, LoongArchCPUClass, - LOONGARCH_CPU) - /** * LoongArchCPUClass: * @parent_realize: The parent class' realize handler. @@ -398,9 +390,7 @@ OBJECT_DECLARE_CPU_TYPE(LoongArchCPU, LoongArchCPUClass, * A LoongArch CPU model. */ struct LoongArchCPUClass { - /*< private >*/ CPUClass parent_class; - /*< public >*/ DeviceRealize parent_realize; ResettablePhases parent_phases; @@ -482,8 +472,6 @@ void loongarch_cpu_list(void); #include "exec/cpu-all.h" -#define LOONGARCH_CPU_TYPE_SUFFIX "-" TYPE_LOONGARCH_CPU -#define LOONGARCH_CPU_TYPE_NAME(model) model LOONGARCH_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU void loongarch_cpu_post_init(Object *obj); diff --git a/target/m68k/cpu-qom.h b/target/m68k/cpu-qom.h index 0ec7750a92..273e8eae41 100644 --- a/target/m68k/cpu-qom.h +++ b/target/m68k/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU Motorola 68k CPU + * QEMU Motorola 68k CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -21,27 +21,12 @@ #define QEMU_M68K_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_M68K_CPU "m68k-cpu" OBJECT_DECLARE_CPU_TYPE(M68kCPU, M68kCPUClass, M68K_CPU) -/* - * M68kCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * A Motorola 68k CPU model. - */ -struct M68kCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - ResettablePhases parent_phases; -}; - +#define M68K_CPU_TYPE_SUFFIX "-" TYPE_M68K_CPU +#define M68K_CPU_TYPE_NAME(model) model M68K_CPU_TYPE_SUFFIX #endif diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 538d9473c2..11c7e0a790 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -111,8 +111,7 @@ static ObjectClass *m68k_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(M68K_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && (object_class_dynamic_cast(oc, TYPE_M68K_CPU) == NULL || - object_class_is_abstract(oc))) { + if (oc != NULL && object_class_dynamic_cast(oc, TYPE_M68K_CPU) == NULL) { return NULL; } return oc; diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h index 20afb0c94d..6cfc696d2b 100644 --- a/target/m68k/cpu.h +++ b/target/m68k/cpu.h @@ -164,13 +164,24 @@ typedef struct CPUArchState { * A Motorola 68k CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUM68KState env; }; +/* + * M68kCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * A Motorola 68k CPU model. + */ +struct M68kCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; +}; #ifndef CONFIG_USER_ONLY void m68k_cpu_do_interrupt(CPUState *cpu); @@ -563,8 +574,6 @@ enum { ACCESS_DATA = 0x20, /* Data load/store access */ }; -#define M68K_CPU_TYPE_SUFFIX "-" TYPE_M68K_CPU -#define M68K_CPU_TYPE_NAME(model) model M68K_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_M68K_CPU #define cpu_list m68k_cpu_list diff --git a/target/microblaze/cpu-qom.h b/target/microblaze/cpu-qom.h index cda9220fa9..92e539fb2f 100644 --- a/target/microblaze/cpu-qom.h +++ b/target/microblaze/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU MicroBlaze CPU + * QEMU MicroBlaze CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -21,27 +21,9 @@ #define QEMU_MICROBLAZE_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_MICROBLAZE_CPU "microblaze-cpu" OBJECT_DECLARE_CPU_TYPE(MicroBlazeCPU, MicroBlazeCPUClass, MICROBLAZE_CPU) -/** - * MicroBlazeCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * A MicroBlaze CPU model. - */ -struct MicroBlazeCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - ResettablePhases parent_phases; -}; - - #endif diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index e43c49d4af..b5374365f5 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -343,9 +343,7 @@ typedef struct { * A MicroBlaze CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUMBState env; @@ -357,6 +355,19 @@ struct ArchCPU { MicroBlazeCPUConfig cfg; }; +/** + * MicroBlazeCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * A MicroBlaze CPU model. + */ +struct MicroBlazeCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; +}; #ifndef CONFIG_USER_ONLY void mb_cpu_do_interrupt(CPUState *cs); diff --git a/target/mips/cpu-qom.h b/target/mips/cpu-qom.h index 0dffab453b..0eea2a2598 100644 --- a/target/mips/cpu-qom.h +++ b/target/mips/cpu-qom.h @@ -21,7 +21,6 @@ #define QEMU_MIPS_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #ifdef TARGET_MIPS64 #define TYPE_MIPS_CPU "mips64-cpu" @@ -31,25 +30,7 @@ OBJECT_DECLARE_CPU_TYPE(MIPSCPU, MIPSCPUClass, MIPS_CPU) -/** - * MIPSCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * A MIPS CPU model. - */ -struct MIPSCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - ResettablePhases parent_phases; - const struct mips_def_t *cpu_def; - - /* Used for the jazz board to modify mips_cpu_do_transaction_failed. */ - bool no_data_aborts; -}; - +#define MIPS_CPU_TYPE_SUFFIX "-" TYPE_MIPS_CPU +#define MIPS_CPU_TYPE_NAME(model) model MIPS_CPU_TYPE_SUFFIX #endif diff --git a/target/mips/cpu.h b/target/mips/cpu.h index 5fddceff3a..52f13f0363 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -1209,9 +1209,7 @@ typedef struct CPUArchState { * A MIPS CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUMIPSState env; @@ -1219,6 +1217,23 @@ struct ArchCPU { Clock *count_div; /* Divider for CP0_Count clock */ }; +/** + * MIPSCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * A MIPS CPU model. + */ +struct MIPSCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; + const struct mips_def_t *cpu_def; + + /* Used for the jazz board to modify mips_cpu_do_transaction_failed. */ + bool no_data_aborts; +}; void mips_cpu_list(void); @@ -1303,8 +1318,6 @@ enum { */ #define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0 -#define MIPS_CPU_TYPE_SUFFIX "-" TYPE_MIPS_CPU -#define MIPS_CPU_TYPE_NAME(model) model MIPS_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_MIPS_CPU bool cpu_type_supports_cps_smp(const char *cpu_type); diff --git a/target/mips/tcg/msa.decode b/target/mips/tcg/msa.decode index 9575289195..4410e2a02e 100644 --- a/target/mips/tcg/msa.decode +++ b/target/mips/tcg/msa.decode @@ -31,8 +31,8 @@ @lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &r @ldst ...... sa:s10 ws:5 wd:5 .... df:2 &msa_i -@bz_v ...... ... .. wt:5 sa:16 &msa_bz df=3 -@bz ...... ... df:2 wt:5 sa:16 &msa_bz +@bz_v ...... ... .. wt:5 sa:s16 &msa_bz df=3 +@bz ...... ... df:2 wt:5 sa:s16 &msa_bz @elm_df ...... .... ...... ws:5 wd:5 ...... &msa_elm_df df=%elm_df n=%elm_n @elm ...... .......... ws:5 wd:5 ...... &msa_elm @vec ...... ..... wt:5 ws:5 wd:5 ...... &msa_r df=0 diff --git a/target/mips/tcg/tx79.decode b/target/mips/tcg/tx79.decode index 57d87a2076..578b8c54c0 100644 --- a/target/mips/tcg/tx79.decode +++ b/target/mips/tcg/tx79.decode @@ -24,7 +24,7 @@ @rs ...... rs:5 ..... .......... ...... &r sa=0 rt=0 rd=0 @rd ...... .......... rd:5 ..... ...... &r sa=0 rs=0 rt=0 -@ldst ...... base:5 rt:5 offset:16 &i +@ldst ...... base:5 rt:5 offset:s16 &i ########################################################################### diff --git a/target/nios2/cpu-qom.h b/target/nios2/cpu-qom.h new file mode 100644 index 0000000000..2fd9121540 --- /dev/null +++ b/target/nios2/cpu-qom.h @@ -0,0 +1,18 @@ +/* + * QEMU Nios II CPU QOM header (target agnostic) + * + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef QEMU_NIOS2_CPU_QOM_H +#define QEMU_NIOS2_CPU_QOM_H + +#include "hw/core/cpu.h" + +#define TYPE_NIOS2_CPU "nios2-cpu" + +OBJECT_DECLARE_CPU_TYPE(Nios2CPU, Nios2CPUClass, NIOS2_CPU) + +#endif diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c index 15e499f828..a27732bf2b 100644 --- a/target/nios2/cpu.c +++ b/target/nios2/cpu.c @@ -199,14 +199,6 @@ static void nios2_cpu_realizefn(DeviceState *dev, Error **errp) Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(dev); Error *local_err = NULL; -#ifndef CONFIG_USER_ONLY - if (cpu->eic_present) { - qdev_init_gpio_in_named(DEVICE(cpu), eic_set_irq, "EIC", 1); - } else { - qdev_init_gpio_in_named(DEVICE(cpu), iic_set_irq, "IRQ", 32); - } -#endif - cpu_exec_realizefn(cs, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); @@ -220,6 +212,14 @@ static void nios2_cpu_realizefn(DeviceState *dev, Error **errp) /* We have reserved storage for cpuid; might as well use it. */ cpu->env.ctrl[CR_CPUID] = cs->cpu_index; +#ifndef CONFIG_USER_ONLY + if (cpu->eic_present) { + qdev_init_gpio_in_named(DEVICE(cpu), eic_set_irq, "EIC", 1); + } else { + qdev_init_gpio_in_named(DEVICE(cpu), iic_set_irq, "IRQ", 32); + } +#endif + ncc->parent_realize(dev, errp); } diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h index 70b6377a4f..2d79b5b298 100644 --- a/target/nios2/cpu.h +++ b/target/nios2/cpu.h @@ -21,20 +21,15 @@ #ifndef NIOS2_CPU_H #define NIOS2_CPU_H +#include "cpu-qom.h" #include "exec/cpu-defs.h" -#include "hw/core/cpu.h" #include "hw/registerfields.h" -#include "qom/object.h" typedef struct CPUArchState CPUNios2State; #if !defined(CONFIG_USER_ONLY) #include "mmu.h" #endif -#define TYPE_NIOS2_CPU "nios2-cpu" - -OBJECT_DECLARE_CPU_TYPE(Nios2CPU, Nios2CPUClass, NIOS2_CPU) - /** * Nios2CPUClass: * @parent_phases: The parent class' reset phase handlers. @@ -42,9 +37,7 @@ OBJECT_DECLARE_CPU_TYPE(Nios2CPU, Nios2CPUClass, NIOS2_CPU) * A Nios2 CPU model. */ struct Nios2CPUClass { - /*< private >*/ CPUClass parent_class; - /*< public >*/ DeviceRealize parent_realize; ResettablePhases parent_phases; @@ -214,9 +207,7 @@ typedef struct { * A Nios2 CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUNios2State env; diff --git a/target/openrisc/cpu-qom.h b/target/openrisc/cpu-qom.h new file mode 100644 index 0000000000..14bac33312 --- /dev/null +++ b/target/openrisc/cpu-qom.h @@ -0,0 +1,21 @@ +/* + * QEMU OpenRISC CPU QOM header (target agnostic) + * + * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com> + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef QEMU_OPENRISC_CPU_QOM_H +#define QEMU_OPENRISC_CPU_QOM_H + +#include "hw/core/cpu.h" + +#define TYPE_OPENRISC_CPU "or1k-cpu" + +OBJECT_DECLARE_CPU_TYPE(OpenRISCCPU, OpenRISCCPUClass, OPENRISC_CPU) + +#define OPENRISC_CPU_TYPE_SUFFIX "-" TYPE_OPENRISC_CPU +#define OPENRISC_CPU_TYPE_NAME(model) model OPENRISC_CPU_TYPE_SUFFIX + +#endif diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index f5a3d5273b..1173260017 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -164,8 +164,7 @@ static ObjectClass *openrisc_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(OPENRISC_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_OPENRISC_CPU) || - object_class_is_abstract(oc))) { + if (oc != NULL && !object_class_dynamic_cast(oc, TYPE_OPENRISC_CPU)) { return NULL; } return oc; diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index 334997e9a1..dedeb89f8e 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -20,17 +20,12 @@ #ifndef OPENRISC_CPU_H #define OPENRISC_CPU_H +#include "cpu-qom.h" #include "exec/cpu-defs.h" #include "fpu/softfloat-types.h" -#include "hw/core/cpu.h" -#include "qom/object.h" #define TCG_GUEST_DEFAULT_MO (0) -#define TYPE_OPENRISC_CPU "or1k-cpu" - -OBJECT_DECLARE_CPU_TYPE(OpenRISCCPU, OpenRISCCPUClass, OPENRISC_CPU) - /** * OpenRISCCPUClass: * @parent_realize: The parent class' realize handler. @@ -39,9 +34,7 @@ OBJECT_DECLARE_CPU_TYPE(OpenRISCCPU, OpenRISCCPUClass, OPENRISC_CPU) * A OpenRISC CPU model. */ struct OpenRISCCPUClass { - /*< private >*/ CPUClass parent_class; - /*< public >*/ DeviceRealize parent_realize; ResettablePhases parent_phases; @@ -301,14 +294,11 @@ typedef struct CPUArchState { * A OpenRISC CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUOpenRISCState env; }; - void cpu_openrisc_list(void); void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); @@ -343,8 +333,6 @@ void cpu_openrisc_count_start(OpenRISCCPU *cpu); void cpu_openrisc_count_stop(OpenRISCCPU *cpu); #endif -#define OPENRISC_CPU_TYPE_SUFFIX "-" TYPE_OPENRISC_CPU -#define OPENRISC_CPU_TYPE_NAME(model) model OPENRISC_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_OPENRISC_CPU #include "exec/cpu-all.h" diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index be33786bd8..0241609efe 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU PowerPC CPU + * QEMU PowerPC CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -21,7 +21,6 @@ #define QEMU_PPC_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #ifdef TARGET_PPC64 #define TYPE_POWERPC_CPU "powerpc64-cpu" @@ -33,170 +32,9 @@ OBJECT_DECLARE_CPU_TYPE(PowerPCCPU, PowerPCCPUClass, POWERPC_CPU) #define POWERPC_CPU_TYPE_SUFFIX "-" TYPE_POWERPC_CPU #define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX -#define CPU_RESOLVING_TYPE TYPE_POWERPC_CPU #define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host") -ObjectClass *ppc_cpu_class_by_name(const char *name); - -typedef struct CPUArchState CPUPPCState; -typedef struct ppc_tb_t ppc_tb_t; -typedef struct ppc_dcr_t ppc_dcr_t; - -/*****************************************************************************/ -/* MMU model */ -typedef enum powerpc_mmu_t powerpc_mmu_t; -enum powerpc_mmu_t { - POWERPC_MMU_UNKNOWN = 0x00000000, - /* Standard 32 bits PowerPC MMU */ - POWERPC_MMU_32B = 0x00000001, - /* PowerPC 6xx MMU with software TLB */ - POWERPC_MMU_SOFT_6xx = 0x00000002, - /* - * PowerPC 74xx MMU with software TLB (this has been - * disabled, see git history for more information. - * keywords: tlbld tlbli TLBMISS PTEHI PTELO) - */ - POWERPC_MMU_SOFT_74xx = 0x00000003, - /* PowerPC 4xx MMU with software TLB */ - POWERPC_MMU_SOFT_4xx = 0x00000004, - /* PowerPC MMU in real mode only */ - POWERPC_MMU_REAL = 0x00000006, - /* Freescale MPC8xx MMU model */ - POWERPC_MMU_MPC8xx = 0x00000007, - /* BookE MMU model */ - POWERPC_MMU_BOOKE = 0x00000008, - /* BookE 2.06 MMU model */ - POWERPC_MMU_BOOKE206 = 0x00000009, -#define POWERPC_MMU_64 0x00010000 - /* 64 bits PowerPC MMU */ - POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001, - /* Architecture 2.03 and later (has LPCR) */ - POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002, - /* Architecture 2.06 variant */ - POWERPC_MMU_2_06 = POWERPC_MMU_64 | 0x00000003, - /* Architecture 2.07 variant */ - POWERPC_MMU_2_07 = POWERPC_MMU_64 | 0x00000004, - /* Architecture 3.00 variant */ - POWERPC_MMU_3_00 = POWERPC_MMU_64 | 0x00000005, -}; - -static inline bool mmu_is_64bit(powerpc_mmu_t mmu_model) -{ - return mmu_model & POWERPC_MMU_64; -} - -/*****************************************************************************/ -/* Exception model */ -typedef enum powerpc_excp_t powerpc_excp_t; -enum powerpc_excp_t { - POWERPC_EXCP_UNKNOWN = 0, - /* Standard PowerPC exception model */ - POWERPC_EXCP_STD, - /* PowerPC 40x exception model */ - POWERPC_EXCP_40x, - /* PowerPC 603/604/G2 exception model */ - POWERPC_EXCP_6xx, - /* PowerPC 7xx exception model */ - POWERPC_EXCP_7xx, - /* PowerPC 74xx exception model */ - POWERPC_EXCP_74xx, - /* BookE exception model */ - POWERPC_EXCP_BOOKE, - /* PowerPC 970 exception model */ - POWERPC_EXCP_970, - /* POWER7 exception model */ - POWERPC_EXCP_POWER7, - /* POWER8 exception model */ - POWERPC_EXCP_POWER8, - /* POWER9 exception model */ - POWERPC_EXCP_POWER9, - /* POWER10 exception model */ - POWERPC_EXCP_POWER10, -}; - -/*****************************************************************************/ -/* PM instructions */ -typedef enum { - PPC_PM_DOZE, - PPC_PM_NAP, - PPC_PM_SLEEP, - PPC_PM_RVWINKLE, - PPC_PM_STOP, -} powerpc_pm_insn_t; - -/*****************************************************************************/ -/* Input pins model */ -typedef enum powerpc_input_t powerpc_input_t; -enum powerpc_input_t { - PPC_FLAGS_INPUT_UNKNOWN = 0, - /* PowerPC 6xx bus */ - PPC_FLAGS_INPUT_6xx, - /* BookE bus */ - PPC_FLAGS_INPUT_BookE, - /* PowerPC 405 bus */ - PPC_FLAGS_INPUT_405, - /* PowerPC 970 bus */ - PPC_FLAGS_INPUT_970, - /* PowerPC POWER7 bus */ - PPC_FLAGS_INPUT_POWER7, - /* PowerPC POWER9 bus */ - PPC_FLAGS_INPUT_POWER9, - /* Freescale RCPU bus */ - PPC_FLAGS_INPUT_RCPU, -}; - -typedef struct PPCHash64Options PPCHash64Options; - -/** - * PowerPCCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * A PowerPC CPU model. - */ -struct PowerPCCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - DeviceUnrealize parent_unrealize; - ResettablePhases parent_phases; - void (*parent_parse_features)(const char *type, char *str, Error **errp); - - uint32_t pvr; - /* - * If @best is false, match if pcc is in the family of pvr - * Else match only if pcc is the best match for pvr in this family. - */ - bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr, bool best); - uint64_t pcr_mask; /* Available bits in PCR register */ - uint64_t pcr_supported; /* Bits for supported PowerISA versions */ - uint32_t svr; - uint64_t insns_flags; - uint64_t insns_flags2; - uint64_t msr_mask; - uint64_t lpcr_mask; /* Available bits in the LPCR */ - uint64_t lpcr_pm; /* Power-saving mode Exit Cause Enable bits */ - powerpc_mmu_t mmu_model; - powerpc_excp_t excp_model; - powerpc_input_t bus_model; - uint32_t flags; - int bfd_mach; - uint32_t l1_dcache_size, l1_icache_size; -#ifndef CONFIG_USER_ONLY - unsigned int gdb_num_sprs; - const char *gdb_spr_xml; -#endif - const PPCHash64Options *hash64_opts; - struct ppc_radix_page_info *radix_page_info; - uint32_t lrg_decr_bits; - int n_host_threads; - void (*init_proc)(CPUPPCState *env); - int (*check_pow)(CPUPPCState *env); -}; - #ifndef CONFIG_USER_ONLY typedef struct PPCTimebase { uint64_t guest_timebase; diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 30392ebeee..f8101ffa29 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -27,6 +27,8 @@ #include "qom/object.h" #include "hw/registerfields.h" +#define CPU_RESOLVING_TYPE TYPE_POWERPC_CPU + #define TCG_GUEST_DEFAULT_MO 0 #define TARGET_PAGE_BITS_64K 16 @@ -190,6 +192,95 @@ enum { POWERPC_EXCP_TRAP = 0x40, }; +/* Exception model */ +typedef enum powerpc_excp_t { + POWERPC_EXCP_UNKNOWN = 0, + /* Standard PowerPC exception model */ + POWERPC_EXCP_STD, + /* PowerPC 40x exception model */ + POWERPC_EXCP_40x, + /* PowerPC 603/604/G2 exception model */ + POWERPC_EXCP_6xx, + /* PowerPC 7xx exception model */ + POWERPC_EXCP_7xx, + /* PowerPC 74xx exception model */ + POWERPC_EXCP_74xx, + /* BookE exception model */ + POWERPC_EXCP_BOOKE, + /* PowerPC 970 exception model */ + POWERPC_EXCP_970, + /* POWER7 exception model */ + POWERPC_EXCP_POWER7, + /* POWER8 exception model */ + POWERPC_EXCP_POWER8, + /* POWER9 exception model */ + POWERPC_EXCP_POWER9, + /* POWER10 exception model */ + POWERPC_EXCP_POWER10, +} powerpc_excp_t; + +/*****************************************************************************/ +/* MMU model */ +typedef enum powerpc_mmu_t { + POWERPC_MMU_UNKNOWN = 0x00000000, + /* Standard 32 bits PowerPC MMU */ + POWERPC_MMU_32B = 0x00000001, + /* PowerPC 6xx MMU with software TLB */ + POWERPC_MMU_SOFT_6xx = 0x00000002, + /* + * PowerPC 74xx MMU with software TLB (this has been + * disabled, see git history for more information. + * keywords: tlbld tlbli TLBMISS PTEHI PTELO) + */ + POWERPC_MMU_SOFT_74xx = 0x00000003, + /* PowerPC 4xx MMU with software TLB */ + POWERPC_MMU_SOFT_4xx = 0x00000004, + /* PowerPC MMU in real mode only */ + POWERPC_MMU_REAL = 0x00000006, + /* Freescale MPC8xx MMU model */ + POWERPC_MMU_MPC8xx = 0x00000007, + /* BookE MMU model */ + POWERPC_MMU_BOOKE = 0x00000008, + /* BookE 2.06 MMU model */ + POWERPC_MMU_BOOKE206 = 0x00000009, +#define POWERPC_MMU_64 0x00010000 + /* 64 bits PowerPC MMU */ + POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001, + /* Architecture 2.03 and later (has LPCR) */ + POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002, + /* Architecture 2.06 variant */ + POWERPC_MMU_2_06 = POWERPC_MMU_64 | 0x00000003, + /* Architecture 2.07 variant */ + POWERPC_MMU_2_07 = POWERPC_MMU_64 | 0x00000004, + /* Architecture 3.00 variant */ + POWERPC_MMU_3_00 = POWERPC_MMU_64 | 0x00000005, +} powerpc_mmu_t; + +static inline bool mmu_is_64bit(powerpc_mmu_t mmu_model) +{ + return mmu_model & POWERPC_MMU_64; +} + +/*****************************************************************************/ +/* Input pins model */ +typedef enum powerpc_input_t { + PPC_FLAGS_INPUT_UNKNOWN = 0, + /* PowerPC 6xx bus */ + PPC_FLAGS_INPUT_6xx, + /* BookE bus */ + PPC_FLAGS_INPUT_BookE, + /* PowerPC 405 bus */ + PPC_FLAGS_INPUT_405, + /* PowerPC 970 bus */ + PPC_FLAGS_INPUT_970, + /* PowerPC POWER7 bus */ + PPC_FLAGS_INPUT_POWER7, + /* PowerPC POWER9 bus */ + PPC_FLAGS_INPUT_POWER9, + /* Freescale RCPU bus */ + PPC_FLAGS_INPUT_RCPU, +} powerpc_input_t; + #define PPC_INPUT(env) ((env)->bus_model) /*****************************************************************************/ @@ -198,9 +289,14 @@ typedef struct opc_handler_t opc_handler_t; /*****************************************************************************/ /* Types used to describe some PowerPC registers etc. */ typedef struct DisasContext DisasContext; +typedef struct ppc_dcr_t ppc_dcr_t; typedef struct ppc_spr_t ppc_spr_t; +typedef struct ppc_tb_t ppc_tb_t; typedef union ppc_tlb_t ppc_tlb_t; typedef struct ppc_hash_pte64 ppc_hash_pte64_t; +typedef struct PPCHash64Options PPCHash64Options; + +typedef struct CPUArchState CPUPPCState; /* SPR access micro-ops generations callbacks */ struct ppc_spr_t { @@ -1313,9 +1409,7 @@ typedef struct PPCVirtualHypervisorClass PPCVirtualHypervisorClass; * A PowerPC CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUPPCState env; @@ -1341,7 +1435,54 @@ struct ArchCPU { int32_t mig_slb_nr; }; +/** + * PowerPCCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * A PowerPC CPU model. + */ +struct PowerPCCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; + ResettablePhases parent_phases; + void (*parent_parse_features)(const char *type, char *str, Error **errp); + + uint32_t pvr; + /* + * If @best is false, match if pcc is in the family of pvr + * Else match only if pcc is the best match for pvr in this family. + */ + bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr, bool best); + uint64_t pcr_mask; /* Available bits in PCR register */ + uint64_t pcr_supported; /* Bits for supported PowerISA versions */ + uint32_t svr; + uint64_t insns_flags; + uint64_t insns_flags2; + uint64_t msr_mask; + uint64_t lpcr_mask; /* Available bits in the LPCR */ + uint64_t lpcr_pm; /* Power-saving mode Exit Cause Enable bits */ + powerpc_mmu_t mmu_model; + powerpc_excp_t excp_model; + powerpc_input_t bus_model; + uint32_t flags; + int bfd_mach; + uint32_t l1_dcache_size, l1_icache_size; +#ifndef CONFIG_USER_ONLY + unsigned int gdb_num_sprs; + const char *gdb_spr_xml; +#endif + const PPCHash64Options *hash64_opts; + struct ppc_radix_page_info *radix_page_info; + uint32_t lrg_decr_bits; + int n_host_threads; + void (*init_proc)(CPUPPCState *env); + int (*check_pow)(CPUPPCState *env); +}; +ObjectClass *ppc_cpu_class_by_name(const char *name); PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr); PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr); PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc); diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 7926114d5c..a42743a3e0 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -3136,7 +3136,7 @@ void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) { CPUState *cs = env_cpu(env); - PowerPCCPU *cpu = POWERPC_CPU(cs); + PowerPCCPU *cpu = env_archcpu(env); CPUState *ccs; uint32_t nr_threads = cs->nr_threads; int ttir = rb & PPC_BITMASK(57, 63); diff --git a/target/ppc/internal.h b/target/ppc/internal.h index c881c67a8b..5b20ecbd33 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -20,6 +20,15 @@ #include "hw/registerfields.h" +/* PM instructions */ +typedef enum { + PPC_PM_DOZE, + PPC_PM_NAP, + PPC_PM_SLEEP, + PPC_PM_RVWINKLE, + PPC_PM_STOP, +} powerpc_pm_insn_t; + #define FUNC_MASK(name, ret_type, size, max_val) \ static inline ret_type name(uint##size##_t start, \ uint##size##_t end) \ diff --git a/target/ppc/kvm-stub.c b/target/ppc/kvm-stub.c deleted file mode 100644 index b98e1d404f..0000000000 --- a/target/ppc/kvm-stub.c +++ /dev/null @@ -1,19 +0,0 @@ -/* - * QEMU KVM PPC specific function stubs - * - * Copyright Freescale Inc. 2013 - * - * Author: Alexander Graf <agraf@suse.de> - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ -#include "qemu/osdep.h" -#include "cpu.h" -#include "hw/ppc/openpic_kvm.h" - -int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs) -{ - return -EINVAL; -} diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index d0e2dcdc77..9b1abe2fc4 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -268,7 +268,7 @@ static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp) "KVM failed to provide the MMU features it supports"); } -struct ppc_radix_page_info *kvm_get_radix_page_info(void) +static struct ppc_radix_page_info *kvmppc_get_radix_page_info(void) { KVMState *s = KVM_STATE(current_accel()); struct ppc_radix_page_info *radix_page_info; @@ -2368,7 +2368,7 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) } #if defined(TARGET_PPC64) - pcc->radix_page_info = kvm_get_radix_page_info(); + pcc->radix_page_info = kvmppc_get_radix_page_info(); if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) { /* diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h index 6a4dd9c560..1975fb5ee6 100644 --- a/target/ppc/kvm_ppc.h +++ b/target/ppc/kvm_ppc.h @@ -13,6 +13,10 @@ #include "exec/hwaddr.h" #include "cpu.h" +#ifdef CONFIG_USER_ONLY +#error Cannot include kvm_ppc.h from user emulation +#endif + #ifdef CONFIG_KVM uint32_t kvmppc_get_tbfreq(void); diff --git a/target/ppc/meson.build b/target/ppc/meson.build index 97ceb6e7c0..0b89f9b89f 100644 --- a/target/ppc/meson.build +++ b/target/ppc/meson.build @@ -30,7 +30,6 @@ gen = [ ] ppc_ss.add(when: 'CONFIG_TCG', if_true: gen) -ppc_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c')) ppc_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user_only_helper.c')) ppc_system_ss = ss.source_set() @@ -46,6 +45,7 @@ ppc_system_ss.add(when: 'CONFIG_TCG', if_true: files( ), if_false: files( 'tcg-stub.c', )) +ppc_system_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) ppc_system_ss.add(when: 'TARGET_PPC64', if_true: files( 'compat.c', diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h index f3fbe37a2c..91b3361dec 100644 --- a/target/riscv/cpu-qom.h +++ b/target/riscv/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU RISC-V CPU QOM header + * QEMU RISC-V CPU QOM header (target agnostic) * * Copyright (c) 2023 Ventana Micro Systems Inc. * @@ -20,14 +20,12 @@ #define RISCV_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_RISCV_CPU "riscv-cpu" #define TYPE_RISCV_DYNAMIC_CPU "riscv-dynamic-cpu" #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) -#define CPU_RESOLVING_TYPE TYPE_RISCV_CPU #define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any") #define TYPE_RISCV_CPU_MAX RISCV_CPU_TYPE_NAME("max") @@ -45,28 +43,6 @@ #define TYPE_RISCV_CPU_VEYRON_V1 RISCV_CPU_TYPE_NAME("veyron-v1") #define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host") -#if defined(TARGET_RISCV32) -# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32 -#elif defined(TARGET_RISCV64) -# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64 -#endif - -typedef struct CPUArchState CPURISCVState; - OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU) -/** - * RISCVCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * A RISCV CPU model. - */ -struct RISCVCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - DeviceRealize parent_realize; - ResettablePhases parent_phases; -}; #endif /* RISCV_CPU_QOM_H */ diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 02db2760d1..83c7c0cf07 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -646,8 +646,7 @@ static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_strfreev(cpuname); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || - object_class_is_abstract(oc)) { + if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU)) { return NULL; } return oc; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 8efc4d83ec..bf58b0f0b5 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -32,6 +32,16 @@ #include "qapi/qapi-types-common.h" #include "cpu-qom.h" +typedef struct CPUArchState CPURISCVState; + +#define CPU_RESOLVING_TYPE TYPE_RISCV_CPU + +#if defined(TARGET_RISCV32) +# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32 +#elif defined(TARGET_RISCV64) +# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64 +#endif + #define TCG_GUEST_DEFAULT_MO 0 /* @@ -411,9 +421,7 @@ struct CPUArchState { * A RISCV CPU. */ struct ArchCPU { - /* < private > */ CPUState parent_obj; - /* < public > */ CPURISCVState env; @@ -430,6 +438,20 @@ struct ArchCPU { GHashTable *pmu_event_ctr_map; }; +/** + * RISCVCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * A RISCV CPU model. + */ +struct RISCVCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; +}; + static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) { return (env->misa_ext & ext) != 0; diff --git a/target/riscv/internals.h b/target/riscv/internals.h index b5f823c7ec..8239ae83cc 100644 --- a/target/riscv/internals.h +++ b/target/riscv/internals.h @@ -87,7 +87,7 @@ enum { static inline uint64_t nanbox_s(CPURISCVState *env, float32 f) { /* the value is sign-extended instead of NaN-boxing for zfinx */ - if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + if (env_archcpu(env)->cfg.ext_zfinx) { return (int32_t)f; } else { return f | MAKE_64BIT_MASK(32, 32); @@ -97,7 +97,7 @@ static inline uint64_t nanbox_s(CPURISCVState *env, float32 f) static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f) { /* Disable NaN-boxing check when enable zfinx */ - if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + if (env_archcpu(env)->cfg.ext_zfinx) { return (uint32_t)f; } @@ -113,7 +113,7 @@ static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f) static inline uint64_t nanbox_h(CPURISCVState *env, float16 f) { /* the value is sign-extended instead of NaN-boxing for zfinx */ - if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + if (env_archcpu(env)->cfg.ext_zfinx) { return (int16_t)f; } else { return f | MAKE_64BIT_MASK(16, 48); @@ -123,7 +123,7 @@ static inline uint64_t nanbox_h(CPURISCVState *env, float16 f) static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f) { /* Disable nanbox check when enable zfinx */ - if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + if (env_archcpu(env)->cfg.ext_zfinx) { return (uint16_t)f; } diff --git a/target/rx/cpu-qom.h b/target/rx/cpu-qom.h index 1c8466a187..ac2e5785ef 100644 --- a/target/rx/cpu-qom.h +++ b/target/rx/cpu-qom.h @@ -1,5 +1,5 @@ /* - * RX CPU + * QEMU RX CPU QOM header (target agnostic) * * Copyright (c) 2019 Yoshinori Sato * @@ -20,7 +20,6 @@ #define RX_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_RX_CPU "rx-cpu" @@ -28,20 +27,7 @@ OBJECT_DECLARE_CPU_TYPE(RXCPU, RXCPUClass, RX_CPU) -/* - * RXCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * A RX CPU model. - */ -struct RXCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - ResettablePhases parent_phases; -}; +#define RX_CPU_TYPE_SUFFIX "-" TYPE_RX_CPU +#define RX_CPU_TYPE_NAME(model) model RX_CPU_TYPE_SUFFIX #endif diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 4d0d3a0c8c..9cc9d9d15e 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -111,16 +111,12 @@ static ObjectClass *rx_cpu_class_by_name(const char *cpu_model) char *typename; oc = object_class_by_name(cpu_model); - if (oc != NULL && object_class_dynamic_cast(oc, TYPE_RX_CPU) != NULL && - !object_class_is_abstract(oc)) { + if (oc != NULL && object_class_dynamic_cast(oc, TYPE_RX_CPU) != NULL) { return oc; } typename = g_strdup_printf(RX_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && object_class_is_abstract(oc)) { - oc = NULL; - } return oc; } diff --git a/target/rx/cpu.h b/target/rx/cpu.h index f66754eb8a..e931e77e85 100644 --- a/target/rx/cpu.h +++ b/target/rx/cpu.h @@ -107,15 +107,25 @@ typedef struct CPUArchState { * A RX CPU */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPURXState env; }; -#define RX_CPU_TYPE_SUFFIX "-" TYPE_RX_CPU -#define RX_CPU_TYPE_NAME(model) model RX_CPU_TYPE_SUFFIX +/* + * RXCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * A RX CPU model. + */ +struct RXCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; +}; + #define CPU_RESOLVING_TYPE TYPE_RX_CPU const char *rx_crname(uint8_t cr); diff --git a/target/s390x/cpu-qom.h b/target/s390x/cpu-qom.h index 00cae2b131..c59bb1eab1 100644 --- a/target/s390x/cpu-qom.h +++ b/target/s390x/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU S/390 CPU + * QEMU S/390 CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -21,47 +21,12 @@ #define QEMU_S390_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_S390_CPU "s390x-cpu" OBJECT_DECLARE_CPU_TYPE(S390CPU, S390CPUClass, S390_CPU) -typedef struct S390CPUModel S390CPUModel; -typedef struct S390CPUDef S390CPUDef; - -typedef struct CPUArchState CPUS390XState; - -typedef enum cpu_reset_type { - S390_CPU_RESET_NORMAL, - S390_CPU_RESET_INITIAL, - S390_CPU_RESET_CLEAR, -} cpu_reset_type; - -/** - * S390CPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. - * @load_normal: Performs a load normal. - * @cpu_reset: Performs a CPU reset. - * @initial_cpu_reset: Performs an initial CPU reset. - * - * An S/390 CPU model. - */ -struct S390CPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - const S390CPUDef *cpu_def; - bool kvm_required; - bool is_static; - bool is_migration_safe; - const char *desc; - - DeviceRealize parent_realize; - DeviceReset parent_reset; - void (*load_normal)(CPUState *cpu); - void (*reset)(CPUState *cpu, cpu_reset_type type); -}; +#define S390_CPU_TYPE_SUFFIX "-" TYPE_S390_CPU +#define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX) #endif diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index 40c5cedd0e..fa3aac4f97 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -29,7 +29,6 @@ #include "cpu_models.h" #include "exec/cpu-defs.h" #include "qemu/cpu-float.h" -#include "tcg/tcg_s390x.h" #include "qapi/qapi-types-machine-common.h" #define ELF_MACHINE_UNAME "S390X" @@ -56,7 +55,7 @@ typedef struct PSW { uint64_t addr; } PSW; -struct CPUArchState { +typedef struct CPUArchState { uint64_t regs[16]; /* GP registers */ /* * The floating point registers are part of the vector registers. @@ -158,7 +157,7 @@ struct CPUArchState { /* currently processed sigp order */ uint8_t sigp_order; -}; +} CPUS390XState; static inline uint64_t *get_freg(CPUS390XState *cs, int nr) { @@ -172,9 +171,7 @@ static inline uint64_t *get_freg(CPUS390XState *cs, int nr) * An S/390 CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUS390XState env; S390CPUModel *model; @@ -183,6 +180,36 @@ struct ArchCPU { uint32_t irqstate_saved_size; }; +typedef enum cpu_reset_type { + S390_CPU_RESET_NORMAL, + S390_CPU_RESET_INITIAL, + S390_CPU_RESET_CLEAR, +} cpu_reset_type; + +/** + * S390CPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * @load_normal: Performs a load normal. + * @cpu_reset: Performs a CPU reset. + * @initial_cpu_reset: Performs an initial CPU reset. + * + * An S/390 CPU model. + */ +struct S390CPUClass { + CPUClass parent_class; + + const S390CPUDef *cpu_def; + bool kvm_required; + bool is_static; + bool is_migration_safe; + const char *desc; + + DeviceRealize parent_realize; + DeviceReset parent_reset; + void (*load_normal)(CPUState *cpu); + void (*reset)(CPUState *cpu, cpu_reset_type type); +}; #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_s390_cpu; @@ -385,6 +412,10 @@ static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch) #endif } +#ifdef CONFIG_TCG + +#include "tcg/tcg_s390x.h" + static inline void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc, uint64_t *cs_base, uint32_t *flags) { @@ -407,6 +438,8 @@ static inline void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc, } } +#endif /* CONFIG_TCG */ + /* PER bits from control register 9 */ #define PER_CR9_EVENT_BRANCH 0x80000000 #define PER_CR9_EVENT_IFETCH 0x40000000 @@ -892,8 +925,6 @@ void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, /* helper.c */ -#define S390_CPU_TYPE_SUFFIX "-" TYPE_S390_CPU -#define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_S390_CPU /* interrupt.c */ diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index 540d445023..a63d990e4e 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -753,7 +753,7 @@ void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, const S390CPUDef *def = s390_find_cpu_def(type, gen, ec_ga, NULL); g_assert(def); - g_assert(QTAILQ_EMPTY_RCU(&cpus)); + g_assert(QTAILQ_EMPTY_RCU(&cpus_queue)); /* build the CPU model */ s390_qemu_cpu_model.def = def; diff --git a/target/s390x/cpu_models.h b/target/s390x/cpu_models.h index cc7305ec21..d7b8912989 100644 --- a/target/s390x/cpu_models.h +++ b/target/s390x/cpu_models.h @@ -18,7 +18,7 @@ #include "hw/core/cpu.h" /* static CPU definition */ -struct S390CPUDef { +typedef struct S390CPUDef { const char *name; /* name exposed to the user */ const char *desc; /* description exposed to the user */ uint8_t gen; /* hw generation identification */ @@ -38,10 +38,10 @@ struct S390CPUDef { S390FeatBitmap full_feat; /* used to init full_feat from generated data */ S390FeatInit full_init; -}; +} S390CPUDef; /* CPU model based on a CPU definition */ -struct S390CPUModel { +typedef struct S390CPUModel { const S390CPUDef *def; S390FeatBitmap features; /* values copied from the "host" model, can change during migration */ @@ -49,7 +49,7 @@ struct S390CPUModel { uint32_t cpu_id; /* CPU id */ uint8_t cpu_id_format; /* CPU id format bit */ uint8_t cpu_ver; /* CPU version, usually "ff" for kvm */ -}; +} S390CPUModel; /* * CPU ID diff --git a/target/s390x/diag.c b/target/s390x/diag.c index 8ce18e08f3..27ffd48576 100644 --- a/target/s390x/diag.c +++ b/target/s390x/diag.c @@ -77,7 +77,7 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra) { bool valid; CPUState *cs = env_cpu(env); - S390CPU *cpu = S390_CPU(cs); + S390CPU *cpu = env_archcpu(env); uint64_t addr = env->regs[r1]; uint64_t subcode = env->regs[r3]; IplParameterBlock *iplb; diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 0f0e784b2a..33ab3551f4 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -1174,12 +1174,12 @@ static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, break; case ICPT_PV_INSTR: g_assert(s390_is_pv()); - sclp_service_call_protected(env, sccb, code); + sclp_service_call_protected(cpu, sccb, code); /* Setting the CC is done by the Ultravisor. */ break; case ICPT_INSTRUCTION: g_assert(!s390_is_pv()); - r = sclp_service_call(env, sccb, code); + r = sclp_service_call(cpu, sccb, code); if (r < 0) { kvm_s390_program_interrupt(cpu, -r); return; @@ -1358,7 +1358,7 @@ static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) mode = env->regs[r1] & 0xffff; isc = (env->regs[r3] >> 27) & 0x7; - r = css_do_sic(env, isc, mode); + r = css_do_sic(cpu, isc, mode); if (r) { kvm_s390_program_interrupt(cpu, -r); } diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index e85658ce22..6aa7907438 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -102,7 +102,7 @@ uint64_t HELPER(stck)(CPUS390XState *env) uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) { qemu_mutex_lock_iothread(); - int r = sclp_service_call(env, r1, r2); + int r = sclp_service_call(env_archcpu(env), r1, r2); qemu_mutex_unlock_iothread(); if (r < 0) { tcg_s390_program_interrupt(env, -r, GETPC()); @@ -761,10 +761,11 @@ void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) { + S390CPU *cpu = env_archcpu(env); int r; qemu_mutex_lock_iothread(); - r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff); + r = css_do_sic(cpu, (r3 >> 27) & 0x7, r1 & 0xffff); qemu_mutex_unlock_iothread(); /* css_do_sic() may actually return a PGM_xxx value to inject */ if (r) { diff --git a/target/sh4/cpu-qom.h b/target/sh4/cpu-qom.h index 89785a90f0..6cf5fbb074 100644 --- a/target/sh4/cpu-qom.h +++ b/target/sh4/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU SuperH CPU + * QEMU SuperH CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -21,7 +21,6 @@ #define QEMU_SUPERH_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_SUPERH_CPU "superh-cpu" @@ -31,28 +30,7 @@ OBJECT_DECLARE_CPU_TYPE(SuperHCPU, SuperHCPUClass, SUPERH_CPU) -/** - * SuperHCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * @pvr: Processor Version Register - * @prr: Processor Revision Register - * @cvr: Cache Version Register - * - * A SuperH CPU model. - */ -struct SuperHCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - ResettablePhases parent_phases; - - uint32_t pvr; - uint32_t prr; - uint32_t cvr; -}; - +#define SUPERH_CPU_TYPE_SUFFIX "-" TYPE_SUPERH_CPU +#define SUPERH_CPU_TYPE_NAME(model) model SUPERH_CPU_TYPE_SUFFIX #endif diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 788e41fea6..a8ec98b134 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -152,9 +152,6 @@ static ObjectClass *superh_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(SUPERH_CPU_TYPE_NAME("%s"), s); oc = object_class_by_name(typename); - if (oc != NULL && object_class_is_abstract(oc)) { - oc = NULL; - } out: g_free(s); diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index f75a235973..360eac1fbe 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -204,13 +204,31 @@ typedef struct CPUArchState { * A SuperH CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUSH4State env; }; +/** + * SuperHCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * @pvr: Processor Version Register + * @prr: Processor Revision Register + * @cvr: Cache Version Register + * + * A SuperH CPU model. + */ +struct SuperHCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; + + uint32_t pvr; + uint32_t prr; + uint32_t cvr; +}; void superh_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int superh_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); @@ -252,8 +270,6 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr); void cpu_load_tlb(CPUSH4State * env); -#define SUPERH_CPU_TYPE_SUFFIX "-" TYPE_SUPERH_CPU -#define SUPERH_CPU_TYPE_NAME(model) model SUPERH_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_SUPERH_CPU #define cpu_list sh4_cpu_list diff --git a/target/sparc/cpu-qom.h b/target/sparc/cpu-qom.h index 78bf00b9a2..a86331bd58 100644 --- a/target/sparc/cpu-qom.h +++ b/target/sparc/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU SPARC CPU + * QEMU SPARC CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * @@ -21,7 +21,6 @@ #define QEMU_SPARC_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #ifdef TARGET_SPARC64 #define TYPE_SPARC_CPU "sparc64-cpu" @@ -31,23 +30,7 @@ OBJECT_DECLARE_CPU_TYPE(SPARCCPU, SPARCCPUClass, SPARC_CPU) -typedef struct sparc_def_t sparc_def_t; -/** - * SPARCCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * - * A SPARC CPU model. - */ -struct SPARCCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - ResettablePhases parent_phases; - sparc_def_t *cpu_def; -}; - +#define SPARC_CPU_TYPE_SUFFIX "-" TYPE_SPARC_CPU +#define SPARC_CPU_TYPE_NAME(model) model SPARC_CPU_TYPE_SUFFIX #endif diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index 3e361a5b75..6999a10a40 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -249,7 +249,7 @@ typedef struct trap_state { #endif #define TARGET_INSN_START_EXTRA_WORDS 1 -struct sparc_def_t { +typedef struct sparc_def_t { const char *name; target_ulong iu_version; uint32_t fpu_version; @@ -263,7 +263,7 @@ struct sparc_def_t { uint32_t features; uint32_t nwindows; uint32_t maxtl; -}; +} sparc_def_t; #define FEATURE(X) CPU_FEATURE_BIT_##X, enum { @@ -562,13 +562,25 @@ struct CPUArchState { * A SPARC CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUSPARCState env; }; +/** + * SPARCCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * + * A SPARC CPU model. + */ +struct SPARCCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; + sparc_def_t *cpu_def; +}; #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_sparc_cpu; @@ -656,8 +668,6 @@ hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, #endif #endif -#define SPARC_CPU_TYPE_SUFFIX "-" TYPE_SPARC_CPU -#define SPARC_CPU_TYPE_NAME(model) model SPARC_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_SPARC_CPU #define cpu_list sparc_cpu_list diff --git a/target/tricore/cpu-qom.h b/target/tricore/cpu-qom.h index 612731daa0..e35dc1ad2d 100644 --- a/target/tricore/cpu-qom.h +++ b/target/tricore/cpu-qom.h @@ -1,4 +1,6 @@ /* + * QEMU TriCore CPU QOM header (target agnostic) + * * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn * * This library is free software; you can redistribute it and/or @@ -19,21 +21,12 @@ #define QEMU_TRICORE_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" - #define TYPE_TRICORE_CPU "tricore-cpu" OBJECT_DECLARE_CPU_TYPE(TriCoreCPU, TriCoreCPUClass, TRICORE_CPU) -struct TriCoreCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - ResettablePhases parent_phases; -}; - +#define TRICORE_CPU_TYPE_SUFFIX "-" TYPE_TRICORE_CPU +#define TRICORE_CPU_TYPE_NAME(model) model TRICORE_CPU_TYPE_SUFFIX #endif /* QEMU_TRICORE_CPU_QOM_H */ diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index 5ca666ee12..034e01c189 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -132,8 +132,7 @@ static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(TRICORE_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_TRICORE_CPU) || - object_class_is_abstract(oc)) { + if (!oc || !object_class_dynamic_cast(oc, TYPE_TRICORE_CPU)) { return NULL; } return oc; diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h index a357b573f2..de3ab53a83 100644 --- a/target/tricore/cpu.h +++ b/target/tricore/cpu.h @@ -63,13 +63,17 @@ typedef struct CPUArchState { * A TriCore CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUTriCoreState env; }; +struct TriCoreCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; +}; hwaddr tricore_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void tricore_cpu_dump_state(CPUState *cpu, FILE *f, int flags); @@ -270,8 +274,6 @@ static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc, *flags = new_flags; } -#define TRICORE_CPU_TYPE_SUFFIX "-" TYPE_TRICORE_CPU -#define TRICORE_CPU_TYPE_NAME(model) model TRICORE_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU /* helpers.c */ diff --git a/target/xtensa/cpu-qom.h b/target/xtensa/cpu-qom.h index 419c7d8e4a..d932346b5f 100644 --- a/target/xtensa/cpu-qom.h +++ b/target/xtensa/cpu-qom.h @@ -1,5 +1,5 @@ /* - * QEMU Xtensa CPU + * QEMU Xtensa CPU QOM header (target agnostic) * * Copyright (c) 2012 SUSE LINUX Products GmbH * All rights reserved. @@ -30,32 +30,12 @@ #define QEMU_XTENSA_CPU_QOM_H #include "hw/core/cpu.h" -#include "qom/object.h" #define TYPE_XTENSA_CPU "xtensa-cpu" OBJECT_DECLARE_CPU_TYPE(XtensaCPU, XtensaCPUClass, XTENSA_CPU) -typedef struct XtensaConfig XtensaConfig; - -/** - * XtensaCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * @config: The CPU core configuration. - * - * An Xtensa CPU model. - */ -struct XtensaCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - ResettablePhases parent_phases; - - const XtensaConfig *config; -}; - +#define XTENSA_CPU_TYPE_SUFFIX "-" TYPE_XTENSA_CPU +#define XTENSA_CPU_TYPE_NAME(model) model XTENSA_CPU_TYPE_SUFFIX #endif diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index ea1dae7390..e20fe87bf2 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -141,8 +141,7 @@ static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc == NULL || !object_class_dynamic_cast(oc, TYPE_XTENSA_CPU) || - object_class_is_abstract(oc)) { + if (oc == NULL || !object_class_dynamic_cast(oc, TYPE_XTENSA_CPU)) { return NULL; } return oc; diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index c6bbef1e5d..dd81729306 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -426,7 +426,7 @@ extern const XtensaOpcodeTranslators xtensa_core_opcodes; extern const XtensaOpcodeTranslators xtensa_fpu2000_opcodes; extern const XtensaOpcodeTranslators xtensa_fpu_opcodes; -struct XtensaConfig { +typedef struct XtensaConfig { const char *name; uint64_t options; XtensaGdbRegmap gdb_regmap; @@ -489,7 +489,7 @@ struct XtensaConfig { const xtensa_mpu_entry *mpu_bg; bool use_first_nan; -}; +} XtensaConfig; typedef struct XtensaConfigList { const XtensaConfig *config; @@ -556,14 +556,28 @@ struct CPUArchState { * An Xtensa CPU. */ struct ArchCPU { - /*< private >*/ CPUState parent_obj; - /*< public >*/ CPUXtensaState env; Clock *clock; }; +/** + * XtensaCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_phases: The parent class' reset phase handlers. + * @config: The CPU core configuration. + * + * An Xtensa CPU model. + */ +struct XtensaCPUClass { + CPUClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; + + const XtensaConfig *config; +}; #ifndef CONFIG_USER_ONLY bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, @@ -588,8 +602,6 @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, #define cpu_list xtensa_cpu_list -#define XTENSA_CPU_TYPE_SUFFIX "-" TYPE_XTENSA_CPU -#define XTENSA_CPU_TYPE_NAME(model) model XTENSA_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_XTENSA_CPU #if TARGET_BIG_ENDIAN diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c index 7bb8cd6726..496754ba57 100644 --- a/target/xtensa/op_helper.c +++ b/target/xtensa/op_helper.c @@ -37,7 +37,7 @@ void HELPER(update_ccount)(CPUXtensaState *env) { - XtensaCPU *cpu = XTENSA_CPU(env_cpu(env)); + XtensaCPU *cpu = env_archcpu(env); uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); env->ccount_time = now; @@ -58,7 +58,7 @@ void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v) void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i) { - XtensaCPU *cpu = XTENSA_CPU(env_cpu(env)); + XtensaCPU *cpu = env_archcpu(env); uint64_t dcc; qatomic_and(&env->sregs[INTSET], diff --git a/tests/avocado/acpi-bits.py b/tests/avocado/acpi-bits.py index eca13dc518..68b9e98d4e 100644 --- a/tests/avocado/acpi-bits.py +++ b/tests/avocado/acpi-bits.py @@ -18,7 +18,7 @@ # # # Author: -# Ani Sinha <ani@anisinha.ca> +# Ani Sinha <anisinha@redhat.com> # pylint: disable=invalid-name # pylint: disable=consider-using-f-string @@ -48,6 +48,7 @@ from typing import ( ) from qemu.machine import QEMUMachine from avocado import skipIf +from avocado.utils import datadrainer as drainer from avocado_qemu import QemuBaseTest deps = ["xorriso", "mformat"] # dependent tools needed in the test setup/box. @@ -141,12 +142,12 @@ class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes self._baseDir = None # following are some standard configuration constants - self._bitsInternalVer = 2020 - self._bitsCommitHash = 'b48b88ff' # commit hash must match + self._bitsInternalVer = 2020 # gitlab CI does shallow clones of depth 20 + self._bitsCommitHash = 'c7920d2b' # commit hash must match # the artifact tag below - self._bitsTag = "qemu-bits-10182022" # this is the latest bits + self._bitsTag = "qemu-bits-10262023" # this is the latest bits # release as of today. - self._bitsArtSHA1Hash = 'b04790ac9b99b5662d0416392c73b97580641fe5' + self._bitsArtSHA1Hash = 'b22cdfcfc7453875297d06d626f5474ee36a343f' self._bitsArtURL = ("https://gitlab.com/qemu-project/" "biosbits-bits/-/jobs/artifacts/%s/" "download?job=qemu-bits-build" %self._bitsTag) @@ -380,16 +381,26 @@ class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes # consistent in terms of timing. smilatency tests have consistent # timing requirements. self._vm.add_args('-icount', 'auto') + # currently there is no support in bits for recognizing 64-bit SMBIOS + # entry points. QEMU defaults to 64-bit entry points since the + # upstream commit bf376f3020 ("hw/i386/pc: Default to use SMBIOS 3.0 + # for newer machine models"). Therefore, enforce 32-bit entry point. + self._vm.add_args('-machine', 'smbios-entry-point-type=32') + + # enable console logging + self._vm.set_console() + self._vm.launch() - args = " ".join(str(arg) for arg in self._vm.base_args()) + \ - " " + " ".join(str(arg) for arg in self._vm.args) - - self.logger.info("launching QEMU vm with the following arguments: %s", - args) + self.logger.debug("Console output from bits VM follows ...") + c_drainer = drainer.LineLogger(self._vm.console_socket.fileno(), + logger=self.logger.getChild("console"), + stop_check=(lambda : + not self._vm.is_running())) + c_drainer.start() - self._vm.launch() # biosbits has been configured to run all the specified test suites # in batch mode and then automatically initiate a vm shutdown. # Rely on avocado's unit test timeout. + self._vm.event_wait('SHUTDOWN') self._vm.wait(timeout=None) self.parse_log() diff --git a/tests/data/acpi/q35/APIC.core-count b/tests/data/acpi/q35/APIC.core-count Binary files differnew file mode 100644 index 0000000000..d9d7ca9a89 --- /dev/null +++ b/tests/data/acpi/q35/APIC.core-count diff --git a/tests/data/acpi/q35/APIC.core-count2 b/tests/data/acpi/q35/APIC.core-count2 Binary files differindex f5da2eb1e8..4f24284434 100644 --- a/tests/data/acpi/q35/APIC.core-count2 +++ b/tests/data/acpi/q35/APIC.core-count2 diff --git a/tests/data/acpi/q35/APIC.thread-count b/tests/data/acpi/q35/APIC.thread-count Binary files differnew file mode 100644 index 0000000000..c27e87fcf1 --- /dev/null +++ b/tests/data/acpi/q35/APIC.thread-count diff --git a/tests/data/acpi/q35/APIC.thread-count2 b/tests/data/acpi/q35/APIC.thread-count2 Binary files differnew file mode 100644 index 0000000000..ac200ab7aa --- /dev/null +++ b/tests/data/acpi/q35/APIC.thread-count2 diff --git a/tests/data/acpi/q35/APIC.type4-count b/tests/data/acpi/q35/APIC.type4-count Binary files differnew file mode 100644 index 0000000000..ab60a6ef06 --- /dev/null +++ b/tests/data/acpi/q35/APIC.type4-count diff --git a/tests/data/acpi/q35/DSDT.core-count b/tests/data/acpi/q35/DSDT.core-count Binary files differnew file mode 100644 index 0000000000..a24b04cbdb --- /dev/null +++ b/tests/data/acpi/q35/DSDT.core-count diff --git a/tests/data/acpi/q35/DSDT.core-count2 b/tests/data/acpi/q35/DSDT.core-count2 Binary files differindex b47891ec10..3a0cb8c581 100644 --- a/tests/data/acpi/q35/DSDT.core-count2 +++ b/tests/data/acpi/q35/DSDT.core-count2 diff --git a/tests/data/acpi/q35/DSDT.thread-count b/tests/data/acpi/q35/DSDT.thread-count Binary files differnew file mode 100644 index 0000000000..a24b04cbdb --- /dev/null +++ b/tests/data/acpi/q35/DSDT.thread-count diff --git a/tests/data/acpi/q35/DSDT.thread-count2 b/tests/data/acpi/q35/DSDT.thread-count2 Binary files differnew file mode 100644 index 0000000000..3a0cb8c581 --- /dev/null +++ b/tests/data/acpi/q35/DSDT.thread-count2 diff --git a/tests/data/acpi/q35/DSDT.type4-count b/tests/data/acpi/q35/DSDT.type4-count Binary files differnew file mode 100644 index 0000000000..edc23198cd --- /dev/null +++ b/tests/data/acpi/q35/DSDT.type4-count diff --git a/tests/data/acpi/q35/FACP.core-count b/tests/data/acpi/q35/FACP.core-count Binary files differnew file mode 100644 index 0000000000..31fa5dd19c --- /dev/null +++ b/tests/data/acpi/q35/FACP.core-count diff --git a/tests/data/acpi/q35/FACP.thread-count b/tests/data/acpi/q35/FACP.thread-count Binary files differnew file mode 100644 index 0000000000..31fa5dd19c --- /dev/null +++ b/tests/data/acpi/q35/FACP.thread-count diff --git a/tests/data/acpi/q35/FACP.thread-count2 b/tests/data/acpi/q35/FACP.thread-count2 Binary files differnew file mode 100644 index 0000000000..31fa5dd19c --- /dev/null +++ b/tests/data/acpi/q35/FACP.thread-count2 diff --git a/tests/data/acpi/q35/FACP.type4-count b/tests/data/acpi/q35/FACP.type4-count Binary files differnew file mode 100644 index 0000000000..31fa5dd19c --- /dev/null +++ b/tests/data/acpi/q35/FACP.type4-count diff --git a/tests/qtest/adm1266-test.c b/tests/qtest/adm1266-test.c new file mode 100644 index 0000000000..6c312c499f --- /dev/null +++ b/tests/qtest/adm1266-test.c @@ -0,0 +1,122 @@ +/* + * Analog Devices ADM1266 Cascadable Super Sequencer with Margin Control and + * Fault Recording with PMBus + * + * Copyright 2022 Google LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include <math.h> +#include "hw/i2c/pmbus_device.h" +#include "libqtest-single.h" +#include "libqos/qgraph.h" +#include "libqos/i2c.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qnum.h" +#include "qemu/bitops.h" + +#define TEST_ID "adm1266-test" +#define TEST_ADDR (0x12) + +#define ADM1266_BLACKBOX_CONFIG 0xD3 +#define ADM1266_PDIO_CONFIG 0xD4 +#define ADM1266_READ_STATE 0xD9 +#define ADM1266_READ_BLACKBOX 0xDE +#define ADM1266_SET_RTC 0xDF +#define ADM1266_GPIO_SYNC_CONFIGURATION 0xE1 +#define ADM1266_BLACKBOX_INFORMATION 0xE6 +#define ADM1266_PDIO_STATUS 0xE9 +#define ADM1266_GPIO_STATUS 0xEA + +/* Defaults */ +#define ADM1266_OPERATION_DEFAULT 0x80 +#define ADM1266_CAPABILITY_DEFAULT 0xA0 +#define ADM1266_CAPABILITY_NO_PEC 0x20 +#define ADM1266_PMBUS_REVISION_DEFAULT 0x22 +#define ADM1266_MFR_ID_DEFAULT "ADI" +#define ADM1266_MFR_ID_DEFAULT_LEN 32 +#define ADM1266_MFR_MODEL_DEFAULT "ADM1266-A1" +#define ADM1266_MFR_MODEL_DEFAULT_LEN 32 +#define ADM1266_MFR_REVISION_DEFAULT "25" +#define ADM1266_MFR_REVISION_DEFAULT_LEN 8 +#define TEST_STRING_A "a sample" +#define TEST_STRING_B "b sample" +#define TEST_STRING_C "rev c" + +static void compare_string(QI2CDevice *i2cdev, uint8_t reg, + const char *test_str) +{ + uint8_t len = i2c_get8(i2cdev, reg); + char i2c_str[SMBUS_DATA_MAX_LEN] = {0}; + + i2c_read_block(i2cdev, reg, (uint8_t *)i2c_str, len); + g_assert_cmpstr(i2c_str, ==, test_str); +} + +static void write_and_compare_string(QI2CDevice *i2cdev, uint8_t reg, + const char *test_str, uint8_t len) +{ + char buf[SMBUS_DATA_MAX_LEN] = {0}; + buf[0] = len; + strncpy(buf + 1, test_str, len); + i2c_write_block(i2cdev, reg, (uint8_t *)buf, len + 1); + compare_string(i2cdev, reg, test_str); +} + +static void test_defaults(void *obj, void *data, QGuestAllocator *alloc) +{ + uint16_t i2c_value; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + i2c_value = i2c_get8(i2cdev, PMBUS_OPERATION); + g_assert_cmphex(i2c_value, ==, ADM1266_OPERATION_DEFAULT); + + i2c_value = i2c_get8(i2cdev, PMBUS_REVISION); + g_assert_cmphex(i2c_value, ==, ADM1266_PMBUS_REVISION_DEFAULT); + + compare_string(i2cdev, PMBUS_MFR_ID, ADM1266_MFR_ID_DEFAULT); + compare_string(i2cdev, PMBUS_MFR_MODEL, ADM1266_MFR_MODEL_DEFAULT); + compare_string(i2cdev, PMBUS_MFR_REVISION, ADM1266_MFR_REVISION_DEFAULT); +} + +/* test r/w registers */ +static void test_rw_regs(void *obj, void *data, QGuestAllocator *alloc) +{ + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + /* empty strings */ + i2c_set8(i2cdev, PMBUS_MFR_ID, 0); + compare_string(i2cdev, PMBUS_MFR_ID, ""); + + i2c_set8(i2cdev, PMBUS_MFR_MODEL, 0); + compare_string(i2cdev, PMBUS_MFR_MODEL, ""); + + i2c_set8(i2cdev, PMBUS_MFR_REVISION, 0); + compare_string(i2cdev, PMBUS_MFR_REVISION, ""); + + /* test strings */ + write_and_compare_string(i2cdev, PMBUS_MFR_ID, TEST_STRING_A, + sizeof(TEST_STRING_A)); + write_and_compare_string(i2cdev, PMBUS_MFR_ID, TEST_STRING_B, + sizeof(TEST_STRING_B)); + write_and_compare_string(i2cdev, PMBUS_MFR_ID, TEST_STRING_C, + sizeof(TEST_STRING_C)); +} + +static void adm1266_register_nodes(void) +{ + QOSGraphEdgeOptions opts = { + .extra_device_opts = "id=" TEST_ID ",address=0x12" + }; + add_qi2c_address(&opts, &(QI2CAddress) { TEST_ADDR }); + + qos_node_create_driver("adm1266", i2c_device_create); + qos_node_consumes("adm1266", "i2c-bus", &opts); + + qos_add_test("test_defaults", "adm1266", test_defaults, NULL); + qos_add_test("test_rw_regs", "adm1266", test_rw_regs, NULL); +} + +libqos_init(adm1266_register_nodes); diff --git a/tests/qtest/ahci-test.c b/tests/qtest/ahci-test.c index eea8b5f77b..5a1923f721 100644 --- a/tests/qtest/ahci-test.c +++ b/tests/qtest/ahci-test.c @@ -1424,6 +1424,89 @@ static void test_reset(void) ahci_shutdown(ahci); } +static void test_reset_pending_callback(void) +{ + AHCIQState *ahci; + AHCICommand *cmd; + uint8_t port; + uint64_t ptr1; + uint64_t ptr2; + + int bufsize = 4 * 1024; + int speed = bufsize + (bufsize / 2); + int offset1 = 0; + int offset2 = bufsize / AHCI_SECTOR_SIZE; + + g_autofree unsigned char *tx1 = g_malloc(bufsize); + g_autofree unsigned char *tx2 = g_malloc(bufsize); + g_autofree unsigned char *rx1 = g_malloc0(bufsize); + g_autofree unsigned char *rx2 = g_malloc0(bufsize); + + /* Uses throttling to make test independent of specific environment. */ + ahci = ahci_boot_and_enable("-drive if=none,id=drive0,file=%s," + "cache=writeback,format=%s," + "throttling.bps-write=%d " + "-M q35 " + "-device ide-hd,drive=drive0 ", + tmp_path, imgfmt, speed); + + port = ahci_port_select(ahci); + ahci_port_clear(ahci, port); + + ptr1 = ahci_alloc(ahci, bufsize); + ptr2 = ahci_alloc(ahci, bufsize); + + g_assert(ptr1 && ptr2); + + /* Need two different patterns. */ + do { + generate_pattern(tx1, bufsize, AHCI_SECTOR_SIZE); + generate_pattern(tx2, bufsize, AHCI_SECTOR_SIZE); + } while (memcmp(tx1, tx2, bufsize) == 0); + + qtest_bufwrite(ahci->parent->qts, ptr1, tx1, bufsize); + qtest_bufwrite(ahci->parent->qts, ptr2, tx2, bufsize); + + /* Write to beginning of disk to check it wasn't overwritten later. */ + ahci_guest_io(ahci, port, CMD_WRITE_DMA_EXT, ptr1, bufsize, offset1); + + /* Issue asynchronously to get a pending callback during reset. */ + cmd = ahci_command_create(CMD_WRITE_DMA_EXT); + ahci_command_adjust(cmd, offset2, ptr2, bufsize, 0); + ahci_command_commit(ahci, cmd, port); + ahci_command_issue_async(ahci, cmd); + + ahci_set(ahci, AHCI_GHC, AHCI_GHC_HR); + + ahci_command_free(cmd); + + /* Wait for throttled write to finish. */ + sleep(1); + + /* Start again. */ + ahci_clean_mem(ahci); + ahci_pci_enable(ahci); + ahci_hba_enable(ahci); + port = ahci_port_select(ahci); + ahci_port_clear(ahci, port); + + /* Read and verify. */ + ahci_guest_io(ahci, port, CMD_READ_DMA_EXT, ptr1, bufsize, offset1); + qtest_bufread(ahci->parent->qts, ptr1, rx1, bufsize); + g_assert_cmphex(memcmp(tx1, rx1, bufsize), ==, 0); + + ahci_guest_io(ahci, port, CMD_READ_DMA_EXT, ptr2, bufsize, offset2); + qtest_bufread(ahci->parent->qts, ptr2, rx2, bufsize); + g_assert_cmphex(memcmp(tx2, rx2, bufsize), ==, 0); + + ahci_free(ahci, ptr1); + ahci_free(ahci, ptr2); + + ahci_clean_mem(ahci); + + ahci_shutdown(ahci); +} + static void test_ncq_simple(void) { AHCIQState *ahci; @@ -1945,7 +2028,8 @@ int main(int argc, char **argv) qtest_add_func("/ahci/migrate/dma/halted", test_migrate_halted_dma); qtest_add_func("/ahci/max", test_max); - qtest_add_func("/ahci/reset", test_reset); + qtest_add_func("/ahci/reset/simple", test_reset); + qtest_add_func("/ahci/reset/pending_callback", test_reset_pending_callback); qtest_add_func("/ahci/io/ncq/simple", test_ncq_simple); qtest_add_func("/ahci/migrate/ncq/simple", test_migrate_ncq); diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c index 9f4bc15aab..71af5cf69f 100644 --- a/tests/qtest/bios-tables-test.c +++ b/tests/qtest/bios-tables-test.c @@ -95,8 +95,11 @@ typedef struct { uint16_t smbios_cpu_curr_speed; uint8_t smbios_core_count; uint16_t smbios_core_count2; + uint8_t smbios_thread_count; + uint16_t smbios_thread_count2; uint8_t *required_struct_types; int required_struct_types_len; + int type4_count; QTestState *qts; } test_data; @@ -639,8 +642,10 @@ static void smbios_cpu_test(test_data *data, uint32_t addr, SmbiosEntryPointType ep_type) { uint8_t core_count, expected_core_count = data->smbios_core_count; + uint8_t thread_count, expected_thread_count = data->smbios_thread_count; uint16_t speed, expected_speed[2]; uint16_t core_count2, expected_core_count2 = data->smbios_core_count2; + uint16_t thread_count2, expected_thread_count2 = data->smbios_thread_count2; int offset[2]; int i; @@ -662,6 +667,13 @@ static void smbios_cpu_test(test_data *data, uint32_t addr, g_assert_cmpuint(core_count, ==, expected_core_count); } + thread_count = qtest_readb(data->qts, + addr + offsetof(struct smbios_type_4, thread_count)); + + if (expected_thread_count) { + g_assert_cmpuint(thread_count, ==, expected_thread_count); + } + if (ep_type == SMBIOS_ENTRY_POINT_TYPE_64) { core_count2 = qtest_readw(data->qts, addr + offsetof(struct smbios_type_4, core_count2)); @@ -670,6 +682,24 @@ static void smbios_cpu_test(test_data *data, uint32_t addr, if (expected_core_count == 0xFF && expected_core_count2) { g_assert_cmpuint(core_count2, ==, expected_core_count2); } + + thread_count2 = qtest_readw(data->qts, + addr + offsetof(struct smbios_type_4, + thread_count2)); + + /* Thread Count has reached its limit, checking Thread Count 2 */ + if (expected_thread_count == 0xFF && expected_thread_count2) { + g_assert_cmpuint(thread_count2, ==, expected_thread_count2); + } + } +} + +static void smbios_type4_count_test(test_data *data, int type4_count) +{ + int expected_type4_count = data->type4_count; + + if (expected_type4_count) { + g_assert_cmpuint(type4_count, ==, expected_type4_count); } } @@ -678,7 +708,7 @@ static void test_smbios_structs(test_data *data, SmbiosEntryPointType ep_type) DECLARE_BITMAP(struct_bitmap, SMBIOS_MAX_TYPE+1) = { 0 }; SmbiosEntryPoint *ep_table = &data->smbios_ep_table; - int i = 0, len, max_len = 0; + int i = 0, len, max_len = 0, type4_count = 0; uint8_t type, prv, crt; uint64_t addr; @@ -704,6 +734,7 @@ static void test_smbios_structs(test_data *data, SmbiosEntryPointType ep_type) if (type == 4) { smbios_cpu_test(data, addr, ep_type); + type4_count++; } /* seek to end of unformatted string area of this struct ("\0\0") */ @@ -747,6 +778,8 @@ static void test_smbios_structs(test_data *data, SmbiosEntryPointType ep_type) for (i = 0; i < data->required_struct_types_len; i++) { g_assert(test_bit(data->required_struct_types[i], struct_bitmap)); } + + smbios_type4_count_test(data, type4_count); } static void test_acpi_load_tables(test_data *data) @@ -970,6 +1003,39 @@ static void test_acpi_q35_tcg(void) free_test_data(&data); } +static void test_acpi_q35_tcg_type4_count(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".type4-count", + .required_struct_types = base_required_struct_types, + .required_struct_types_len = ARRAY_SIZE(base_required_struct_types), + .type4_count = 5, + }; + + test_acpi_one("-machine smbios-entry-point-type=64 " + "-smp cpus=100,maxcpus=120,sockets=5," + "dies=2,cores=4,threads=3", &data); + free_test_data(&data); +} + +static void test_acpi_q35_tcg_core_count(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".core-count", + .required_struct_types = base_required_struct_types, + .required_struct_types_len = ARRAY_SIZE(base_required_struct_types), + .smbios_core_count = 9, + .smbios_core_count2 = 9, + }; + + test_acpi_one("-machine smbios-entry-point-type=64 " + "-smp 54,sockets=2,dies=3,cores=3,threads=3", + &data); + free_test_data(&data); +} + static void test_acpi_q35_tcg_core_count2(void) { test_data data = { @@ -978,10 +1044,46 @@ static void test_acpi_q35_tcg_core_count2(void) .required_struct_types = base_required_struct_types, .required_struct_types_len = ARRAY_SIZE(base_required_struct_types), .smbios_core_count = 0xFF, - .smbios_core_count2 = 275, + .smbios_core_count2 = 260, + }; + + test_acpi_one("-machine smbios-entry-point-type=64 " + "-smp 260,dies=2,cores=130,threads=1", + &data); + free_test_data(&data); +} + +static void test_acpi_q35_tcg_thread_count(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".thread-count", + .required_struct_types = base_required_struct_types, + .required_struct_types_len = ARRAY_SIZE(base_required_struct_types), + .smbios_thread_count = 27, + .smbios_thread_count2 = 27, + }; + + test_acpi_one("-machine smbios-entry-point-type=64 " + "-smp cpus=15,maxcpus=54,sockets=2,dies=3,cores=3,threads=3", + &data); + free_test_data(&data); +} + +static void test_acpi_q35_tcg_thread_count2(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".thread-count2", + .required_struct_types = base_required_struct_types, + .required_struct_types_len = ARRAY_SIZE(base_required_struct_types), + .smbios_thread_count = 0xFF, + .smbios_thread_count2 = 260, }; - test_acpi_one("-machine smbios-entry-point-type=64 -smp 275", &data); + test_acpi_one("-machine smbios-entry-point-type=64 " + "-smp cpus=210,maxcpus=260,dies=2,cores=65,threads=2", + &data); free_test_data(&data); } @@ -2147,8 +2249,16 @@ int main(int argc, char *argv[]) if (has_kvm) { qtest_add_func("acpi/q35/kvm/xapic", test_acpi_q35_kvm_xapic); qtest_add_func("acpi/q35/kvm/dmar", test_acpi_q35_kvm_dmar); + qtest_add_func("acpi/q35/type4-count", + test_acpi_q35_tcg_type4_count); + qtest_add_func("acpi/q35/core-count", + test_acpi_q35_tcg_core_count); qtest_add_func("acpi/q35/core-count2", test_acpi_q35_tcg_core_count2); + qtest_add_func("acpi/q35/thread-count", + test_acpi_q35_tcg_thread_count); + qtest_add_func("acpi/q35/thread-count2", + test_acpi_q35_tcg_thread_count2); } if (qtest_has_device("virtio-iommu-pci")) { qtest_add_func("acpi/q35/viot", test_acpi_q35_viot); diff --git a/tests/qtest/max34451-test.c b/tests/qtest/max34451-test.c index 0c98d0764c..dbf6ddc829 100644 --- a/tests/qtest/max34451-test.c +++ b/tests/qtest/max34451-test.c @@ -18,6 +18,7 @@ #define TEST_ID "max34451-test" #define TEST_ADDR (0x4e) +#define MAX34451_MFR_MODE 0xD1 #define MAX34451_MFR_VOUT_PEAK 0xD4 #define MAX34451_MFR_IOUT_PEAK 0xD5 #define MAX34451_MFR_TEMPERATURE_PEAK 0xD6 @@ -315,6 +316,28 @@ static void test_ot_faults(void *obj, void *data, QGuestAllocator *alloc) } } +#define RAND_ON_OFF_CONFIG 0x12 +#define RAND_MFR_MODE 0x3456 + +/* test writes to all pages */ +static void test_all_pages(void *obj, void *data, QGuestAllocator *alloc) +{ + uint16_t i2c_value; + QI2CDevice *i2cdev = (QI2CDevice *)obj; + + i2c_set8(i2cdev, PMBUS_PAGE, PB_ALL_PAGES); + i2c_set8(i2cdev, PMBUS_ON_OFF_CONFIG, RAND_ON_OFF_CONFIG); + max34451_i2c_set16(i2cdev, MAX34451_MFR_MODE, RAND_MFR_MODE); + + for (int i = 0; i < MAX34451_NUM_TEMP_DEVICES + MAX34451_NUM_PWR_DEVICES; + i++) { + i2c_value = i2c_get8(i2cdev, PMBUS_ON_OFF_CONFIG); + g_assert_cmphex(i2c_value, ==, RAND_ON_OFF_CONFIG); + i2c_value = max34451_i2c_get16(i2cdev, MAX34451_MFR_MODE); + g_assert_cmphex(i2c_value, ==, RAND_MFR_MODE); + } +} + static void max34451_register_nodes(void) { QOSGraphEdgeOptions opts = { @@ -332,5 +355,6 @@ static void max34451_register_nodes(void) qos_add_test("test_ro_regs", "max34451", test_ro_regs, NULL); qos_add_test("test_ov_faults", "max34451", test_ov_faults, NULL); qos_add_test("test_ot_faults", "max34451", test_ot_faults, NULL); + qos_add_test("test_all_pages", "max34451", test_all_pages, NULL); } libqos_init(max34451_register_nodes); diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index c9945e69b1..47dabf91d0 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -241,6 +241,7 @@ qos_test_ss = ss.source_set() qos_test_ss.add( 'ac97-test.c', 'adm1272-test.c', + 'adm1266-test.c', 'ds1338-test.c', 'e1000-test.c', 'eepro100-test.c', diff --git a/tests/unit/meson.build b/tests/unit/meson.build index e6c51e7a86..a05d471090 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -22,8 +22,8 @@ tests = { 'test-visitor-serialization': [testqapi], 'test-bitmap': [], 'test-resv-mem': [], - # all code tested by test-x86-cpuid is inside topology.h - 'test-x86-cpuid': [], + # all code tested by test-x86-topo is inside topology.h + 'test-x86-topo': [], 'test-cutils': [], 'test-div128': [], 'test-shift128': [], diff --git a/tests/unit/test-seccomp.c b/tests/unit/test-seccomp.c index f02c79cafd..bab93fd6da 100644 --- a/tests/unit/test-seccomp.c +++ b/tests/unit/test-seccomp.c @@ -229,26 +229,26 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); if (can_play_with_seccomp()) { #ifdef SYS_fork - g_test_add_func("/softmmu/seccomp/sys-fork/on", + g_test_add_func("/seccomp/sys-fork/on", test_seccomp_sys_fork_on); - g_test_add_func("/softmmu/seccomp/sys-fork/on-nospawn", + g_test_add_func("/seccomp/sys-fork/on-nospawn", test_seccomp_sys_fork_on_nospawn); - g_test_add_func("/softmmu/seccomp/sys-fork/off", + g_test_add_func("/seccomp/sys-fork/off", test_seccomp_sys_fork_off); #endif - g_test_add_func("/softmmu/seccomp/fork/on", + g_test_add_func("/seccomp/fork/on", test_seccomp_fork_on); - g_test_add_func("/softmmu/seccomp/fork/on-nospawn", + g_test_add_func("/seccomp/fork/on-nospawn", test_seccomp_fork_on_nospawn); - g_test_add_func("/softmmu/seccomp/fork/off", + g_test_add_func("/seccomp/fork/off", test_seccomp_fork_off); - g_test_add_func("/softmmu/seccomp/thread/on", + g_test_add_func("/seccomp/thread/on", test_seccomp_thread_on); - g_test_add_func("/softmmu/seccomp/thread/on-nospawn", + g_test_add_func("/seccomp/thread/on-nospawn", test_seccomp_thread_on_nospawn); - g_test_add_func("/softmmu/seccomp/thread/off", + g_test_add_func("/seccomp/thread/off", test_seccomp_thread_off); if (doit_sched() == 0) { @@ -256,11 +256,11 @@ int main(int argc, char **argv) * musl doesn't impl sched_setscheduler, hence * we check above if it works first */ - g_test_add_func("/softmmu/seccomp/sched/on", + g_test_add_func("/seccomp/sched/on", test_seccomp_sched_on); - g_test_add_func("/softmmu/seccomp/sched/on-nores", + g_test_add_func("/seccomp/sched/on-nores", test_seccomp_sched_on_nores); - g_test_add_func("/softmmu/seccomp/sched/off", + g_test_add_func("/seccomp/sched/off", test_seccomp_sched_off); } } diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c index fdc39a846c..24972666a7 100644 --- a/tests/unit/test-smp-parse.c +++ b/tests/unit/test-smp-parse.c @@ -394,20 +394,47 @@ static char *smp_config_to_string(const SMPConfiguration *config) config->has_maxcpus ? "true" : "false", config->maxcpus); } -static char *cpu_topology_to_string(const CpuTopology *topo) +/* Use the different calculation than machine_topo_get_threads_per_socket(). */ +static unsigned int cpu_topology_get_threads_per_socket(const CpuTopology *topo) +{ + /* Check the divisor to avoid invalid topology examples causing SIGFPE. */ + if (!topo->sockets) { + return 0; + } else { + return topo->max_cpus / topo->sockets; + } +} + +/* Use the different calculation than machine_topo_get_cores_per_socket(). */ +static unsigned int cpu_topology_get_cores_per_socket(const CpuTopology *topo) +{ + /* Check the divisor to avoid invalid topology examples causing SIGFPE. */ + if (!topo->threads) { + return 0; + } else { + return cpu_topology_get_threads_per_socket(topo) / topo->threads; + } +} + +static char *cpu_topology_to_string(const CpuTopology *topo, + unsigned int threads_per_socket, + unsigned int cores_per_socket) { return g_strdup_printf( "(CpuTopology) {\n" - " .cpus = %u,\n" - " .sockets = %u,\n" - " .dies = %u,\n" - " .clusters = %u,\n" - " .cores = %u,\n" - " .threads = %u,\n" - " .max_cpus = %u,\n" + " .cpus = %u,\n" + " .sockets = %u,\n" + " .dies = %u,\n" + " .clusters = %u,\n" + " .cores = %u,\n" + " .threads = %u,\n" + " .max_cpus = %u,\n" + " .threads_per_socket = %u,\n" + " .cores_per_socket = %u,\n" "}", topo->cpus, topo->sockets, topo->dies, topo->clusters, - topo->cores, topo->threads, topo->max_cpus); + topo->cores, topo->threads, topo->max_cpus, + threads_per_socket, cores_per_socket); } static void check_parse(MachineState *ms, const SMPConfiguration *config, @@ -415,14 +442,26 @@ static void check_parse(MachineState *ms, const SMPConfiguration *config, bool is_valid) { g_autofree char *config_str = smp_config_to_string(config); - g_autofree char *expect_topo_str = cpu_topology_to_string(expect_topo); - g_autofree char *output_topo_str = NULL; + g_autofree char *expect_topo_str = NULL, *output_topo_str = NULL; + unsigned int expect_threads_per_socket, expect_cores_per_socket; + unsigned int ms_threads_per_socket, ms_cores_per_socket; Error *err = NULL; + expect_threads_per_socket = + cpu_topology_get_threads_per_socket(expect_topo); + expect_cores_per_socket = + cpu_topology_get_cores_per_socket(expect_topo); + expect_topo_str = cpu_topology_to_string(expect_topo, + expect_threads_per_socket, + expect_cores_per_socket); + /* call the generic parser */ machine_parse_smp_config(ms, config, &err); - output_topo_str = cpu_topology_to_string(&ms->smp); + ms_threads_per_socket = machine_topo_get_threads_per_socket(ms); + ms_cores_per_socket = machine_topo_get_cores_per_socket(ms); + output_topo_str = cpu_topology_to_string(&ms->smp, ms_threads_per_socket, + ms_cores_per_socket); /* when the configuration is supposed to be valid */ if (is_valid) { @@ -433,7 +472,9 @@ static void check_parse(MachineState *ms, const SMPConfiguration *config, (ms->smp.clusters == expect_topo->clusters) && (ms->smp.cores == expect_topo->cores) && (ms->smp.threads == expect_topo->threads) && - (ms->smp.max_cpus == expect_topo->max_cpus)) { + (ms->smp.max_cpus == expect_topo->max_cpus) && + (ms_threads_per_socket == expect_threads_per_socket) && + (ms_cores_per_socket == expect_cores_per_socket)) { return; } diff --git a/tests/unit/test-x86-cpuid.c b/tests/unit/test-x86-topo.c index bfabc0403a..2b104f86d7 100644 --- a/tests/unit/test-x86-cpuid.c +++ b/tests/unit/test-x86-topo.c @@ -1,5 +1,5 @@ /* - * Test code for x86 CPUID and Topology functions + * Test code for x86 APIC ID and Topology functions * * Copyright (c) 2012 Red Hat Inc. * diff --git a/tests/vm/ubuntu.aarch64 b/tests/vm/ubuntu.aarch64 index 666947393b..eeda281f87 100755 --- a/tests/vm/ubuntu.aarch64 +++ b/tests/vm/ubuntu.aarch64 @@ -25,7 +25,7 @@ DEFAULT_CONFIG = { "apt-get install -y libfdt-dev pkg-config language-pack-en ninja-build", # We increase beyond the default time since during boot # it can take some time (many seconds) to log into the VM - # especially using softmmu. + # especially using TCG. 'ssh_timeout' : 60, } diff --git a/ui/cocoa.m b/ui/cocoa.m index d95276013c..cd069da696 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -1247,7 +1247,6 @@ static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEven [normalWindow makeKeyAndOrderFront:self]; [normalWindow center]; [normalWindow setDelegate: self]; - stretch_video = false; /* Used for displaying pause on the screen */ pauseLabel = [NSTextField new]; @@ -1671,7 +1670,9 @@ static void create_initial_menus(void) // View menu menu = [[NSMenu alloc] initWithTitle:@"View"]; [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"Enter Fullscreen" action:@selector(doToggleFullScreen:) keyEquivalent:@"f"] autorelease]]; // Fullscreen - [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"Zoom To Fit" action:@selector(zoomToFit:) keyEquivalent:@""] autorelease]]; + menuItem = [[[NSMenuItem alloc] initWithTitle:@"Zoom To Fit" action:@selector(zoomToFit:) keyEquivalent:@""] autorelease]; + [menuItem setState: stretch_video ? NSControlStateValueOn : NSControlStateValueOff]; + [menu addItem: menuItem]; menuItem = [[[NSMenuItem alloc] initWithTitle:@"View" action:nil keyEquivalent:@""] autorelease]; [menuItem setSubmenu:menu]; [[NSApp mainMenu] addItem:menuItem]; @@ -2041,18 +2042,6 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) [QemuApplication sharedApplication]; - create_initial_menus(); - - /* - * Create the menu entries which depend on QEMU state (for consoles - * and removable devices). These make calls back into QEMU functions, - * which is OK because at this point we know that the second thread - * holds the iothread lock and is synchronously waiting for us to - * finish. - */ - add_console_menu_entries(); - addRemovableDevicesMenuItems(); - // Create an Application controller QemuCocoaAppController *controller = [[QemuCocoaAppController alloc] init]; [NSApp setDelegate:controller]; @@ -2077,6 +2066,21 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) left_command_key_enabled = 0; } + if (opts->u.cocoa.has_zoom_to_fit && opts->u.cocoa.zoom_to_fit) { + stretch_video = true; + } + + create_initial_menus(); + /* + * Create the menu entries which depend on QEMU state (for consoles + * and removable devices). These make calls back into QEMU functions, + * which is OK because at this point we know that the second thread + * holds the iothread lock and is synchronously waiting for us to + * finish. + */ + add_console_menu_entries(); + addRemovableDevicesMenuItems(); + // register vga output callbacks register_displaychangelistener(&dcl); diff --git a/ui/console-vc-stubs.c b/ui/console-vc-stubs.c new file mode 100644 index 0000000000..2afc52329f --- /dev/null +++ b/ui/console-vc-stubs.c @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * QEMU VC stubs + */ +#include "qemu/osdep.h" + +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "chardev/char.h" +#include "ui/console-priv.h" + +void qemu_text_console_select(QemuTextConsole *c) +{ +} + +const char * +qemu_text_console_get_label(QemuTextConsole *c) +{ + return NULL; +} + +void qemu_text_console_update_cursor(void) +{ +} + +void qemu_text_console_handle_keysym(QemuTextConsole *s, int keysym) +{ +} + +void qemu_console_early_init(void) +{ +} diff --git a/ui/console.c b/ui/console.c index 8ee66d10c5..8e688d3569 100644 --- a/ui/console.c +++ b/ui/console.c @@ -584,6 +584,7 @@ DisplaySurface *qemu_create_placeholder_surface(int w, int h, const char *msg) { DisplaySurface *surface = qemu_create_displaysurface(w, h); +#ifdef CONFIG_PIXMAN pixman_color_t bg = QEMU_PIXMAN_COLOR_BLACK; pixman_color_t fg = QEMU_PIXMAN_COLOR_GRAY; pixman_image_t *glyph; @@ -598,6 +599,7 @@ DisplaySurface *qemu_create_placeholder_surface(int w, int h, x+i, y, FONT_WIDTH, FONT_HEIGHT); qemu_pixman_image_unref(glyph); } +#endif surface->flags |= QEMU_PLACEHOLDER_FLAG; return surface; } @@ -1675,6 +1677,23 @@ void qemu_display_init(DisplayState *ds, DisplayOptions *opts) dpys[opts->type]->init(ds, opts); } +const char *qemu_display_get_vc(DisplayOptions *opts) +{ + assert(opts->type < DISPLAY_TYPE__MAX); + if (opts->type == DISPLAY_TYPE_NONE) { + return NULL; + } + assert(dpys[opts->type] != NULL); + if (dpys[opts->type]->vc) { + return dpys[opts->type]->vc; + } else { +#ifdef CONFIG_PIXMAN + return "vc:80Cx24C"; +#endif + } + return NULL; +} + void qemu_display_help(void) { int idx; diff --git a/ui/dbus-listener.c b/ui/dbus-listener.c index 36548a7f52..18f556aa73 100644 --- a/ui/dbus-listener.c +++ b/ui/dbus-listener.c @@ -26,9 +26,6 @@ #include "qapi/error.h" #include "sysemu/sysemu.h" #include "dbus.h" -#ifdef CONFIG_OPENGL -#include <pixman.h> -#endif #ifdef G_OS_UNIX #include <gio/gunixfdlist.h> #endif @@ -41,6 +38,7 @@ #include "ui/shader.h" #include "ui/egl-helpers.h" #include "ui/egl-context.h" +#include "ui/qemu-pixman.h" #endif #include "trace.h" @@ -62,9 +60,11 @@ struct _DBusDisplayListener { QemuDBusDisplay1Listener *proxy; -#ifdef CONFIG_OPENGL +#ifdef CONFIG_PIXMAN /* Keep track of the damage region */ pixman_region32_t gl_damage; +#else + int gl_damage; #endif DisplayChangeListener dcl; @@ -545,6 +545,7 @@ static void dbus_gl_refresh(DisplayChangeListener *dcl) return; } +#ifdef CONFIG_PIXMAN int n_rects = pixman_region32_n_rects(&ddl->gl_damage); for (int i = 0; i < n_rects; i++) { @@ -555,6 +556,13 @@ static void dbus_gl_refresh(DisplayChangeListener *dcl) box->x2 - box->x1, box->y2 - box->y1); } pixman_region32_clear(&ddl->gl_damage); +#else + if (ddl->gl_damage) { + dbus_call_update_gl(dcl, 0, 0, + surface_width(ddl->ds), surface_height(ddl->ds)); + ddl->gl_damage = 0; + } +#endif } #endif /* OPENGL */ @@ -569,20 +577,64 @@ static void dbus_gl_gfx_update(DisplayChangeListener *dcl, { DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); +#ifdef CONFIG_PIXMAN pixman_region32_t rect_region; pixman_region32_init_rect(&rect_region, x, y, w, h); pixman_region32_union(&ddl->gl_damage, &ddl->gl_damage, &rect_region); pixman_region32_fini(&rect_region); +#else + ddl->gl_damage++; +#endif } #endif +static void dbus_gfx_update_sub(DBusDisplayListener *ddl, + int x, int y, int w, int h) +{ + pixman_image_t *img; + size_t stride; + GVariant *v_data; + + /* make a copy, since gvariant only handles linear data */ + stride = w * DIV_ROUND_UP(PIXMAN_FORMAT_BPP(surface_format(ddl->ds)), 8); + img = pixman_image_create_bits(surface_format(ddl->ds), + w, h, NULL, stride); +#ifdef CONFIG_PIXMAN + pixman_image_composite(PIXMAN_OP_SRC, ddl->ds->image, NULL, img, + x, y, 0, 0, 0, 0, w, h); +#else + { + uint8_t *src = (uint8_t *)pixman_image_get_data(ddl->ds->image); + uint8_t *dst = (uint8_t *)pixman_image_get_data(img); + int bp = PIXMAN_FORMAT_BPP(surface_format(ddl->ds)) / 8; + int hh; + + for (hh = 0; hh < h; hh++) { + memcpy(&dst[stride * hh], + &src[surface_stride(ddl->ds) * (hh + y) + x * bp], + stride); + } + } +#endif + v_data = g_variant_new_from_data( + G_VARIANT_TYPE("ay"), + pixman_image_get_data(img), + pixman_image_get_stride(img) * h, + TRUE, + (GDestroyNotify)pixman_image_unref, + img); + qemu_dbus_display1_listener_call_update(ddl->proxy, + x, y, w, h, pixman_image_get_stride(img), pixman_image_get_format(img), + v_data, + G_DBUS_CALL_FLAGS_NONE, + DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL); +} + static void dbus_gfx_update(DisplayChangeListener *dcl, int x, int y, int w, int h) { DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl); - pixman_image_t *img; GVariant *v_data; - size_t stride; assert(ddl->ds); @@ -619,25 +671,7 @@ static void dbus_gfx_update(DisplayChangeListener *dcl, return; } - /* make a copy, since gvariant only handles linear data */ - stride = w * DIV_ROUND_UP(PIXMAN_FORMAT_BPP(surface_format(ddl->ds)), 8); - img = pixman_image_create_bits(surface_format(ddl->ds), - w, h, NULL, stride); - pixman_image_composite(PIXMAN_OP_SRC, ddl->ds->image, NULL, img, - x, y, 0, 0, 0, 0, w, h); - - v_data = g_variant_new_from_data( - G_VARIANT_TYPE("ay"), - pixman_image_get_data(img), - pixman_image_get_stride(img) * h, - TRUE, - (GDestroyNotify)pixman_image_unref, - img); - qemu_dbus_display1_listener_call_update(ddl->proxy, - x, y, w, h, pixman_image_get_stride(img), pixman_image_get_format(img), - v_data, - G_DBUS_CALL_FLAGS_NONE, - DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL); + dbus_gfx_update_sub(ddl, x, y, w, h); } #ifdef CONFIG_OPENGL @@ -751,8 +785,10 @@ dbus_display_listener_dispose(GObject *object) g_clear_object(&ddl->map_proxy); g_clear_object(&ddl->d3d11_proxy); g_clear_pointer(&ddl->peer_process, CloseHandle); -#ifdef CONFIG_OPENGL +#ifdef CONFIG_PIXMAN pixman_region32_fini(&ddl->gl_damage); +#endif +#ifdef CONFIG_OPENGL egl_fb_destroy(&ddl->fb); #endif #endif @@ -787,7 +823,7 @@ dbus_display_listener_class_init(DBusDisplayListenerClass *klass) static void dbus_display_listener_init(DBusDisplayListener *ddl) { -#ifdef CONFIG_OPENGL +#ifdef CONFIG_PIXMAN pixman_region32_init(&ddl->gl_damage); #endif } diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c index a1060fd80f..cd2f176502 100644 --- a/ui/gtk-egl.c +++ b/ui/gtk-egl.c @@ -69,15 +69,16 @@ void gd_egl_draw(VirtualConsole *vc) #ifdef CONFIG_GBM QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf; #endif - int ww, wh; + int ww, wh, ws; if (!vc->gfx.gls) { return; } window = gtk_widget_get_window(vc->gfx.drawing_area); - ww = gdk_window_get_width(window); - wh = gdk_window_get_height(window); + ws = gdk_window_get_scale_factor(window); + ww = gdk_window_get_width(window) * ws; + wh = gdk_window_get_height(window) * ws; if (vc->gfx.scanout_mode) { #ifdef CONFIG_GBM @@ -243,12 +244,19 @@ void gd_egl_scanout_texture(DisplayChangeListener *dcl, vc->gfx.h = h; vc->gfx.y0_top = backing_y_0_top; - eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, - vc->gfx.esurface, vc->gfx.ectx); + if (!vc->gfx.esurface) { + gd_egl_init(vc); + if (!vc->gfx.esurface) { + return; + } + + eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, + vc->gfx.esurface, vc->gfx.ectx); - gtk_egl_set_scanout_mode(vc, true); - egl_fb_setup_for_tex(&vc->gfx.guest_fb, backing_width, backing_height, - backing_id, false); + gtk_egl_set_scanout_mode(vc, true); + egl_fb_setup_for_tex(&vc->gfx.guest_fb, backing_width, backing_height, + backing_id, false); + } } void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl, @@ -312,7 +320,7 @@ void gd_egl_scanout_flush(DisplayChangeListener *dcl, { VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); GdkWindow *window; - int ww, wh; + int ww, wh, ws; if (!vc->gfx.scanout_mode) { return; @@ -325,8 +333,9 @@ void gd_egl_scanout_flush(DisplayChangeListener *dcl, vc->gfx.esurface, vc->gfx.ectx); window = gtk_widget_get_window(vc->gfx.drawing_area); - ww = gdk_window_get_width(window); - wh = gdk_window_get_height(window); + ws = gdk_window_get_scale_factor(window); + ww = gdk_window_get_width(window) * ws; + wh = gdk_window_get_height(window) * ws; egl_fb_setup_default(&vc->gfx.win_fb, ww, wh); if (vc->gfx.cursor_fb.texture) { egl_texture_blit(vc->gfx.gls, &vc->gfx.win_fb, &vc->gfx.guest_fb, @@ -1400,7 +1400,7 @@ static void gd_menu_untabify(GtkMenuItem *item, void *opaque) eglDestroySurface(qemu_egl_display, vc->gfx.esurface); vc->gfx.esurface = NULL; } - if (vc->gfx.esurface) { + if (vc->gfx.ectx) { eglDestroyContext(qemu_egl_display, vc->gfx.ectx); vc->gfx.ectx = NULL; } @@ -2371,6 +2371,7 @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) GdkDisplay *window_display; GtkIconTheme *theme; char *dir; + int idx; if (!gtkinit) { fprintf(stderr, "gtk initialization failed\n"); @@ -2434,6 +2435,15 @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) gtk_container_add(GTK_CONTAINER(s->window), s->vbox); gtk_widget_show_all(s->window); + + for (idx = 0;; idx++) { + QemuConsole *con = qemu_console_lookup_by_index(idx); + if (!con) { + break; + } + gtk_widget_realize(s->vc[idx].gfx.drawing_area); + } + if (opts->u.gtk.has_show_menubar && !opts->u.gtk.show_menubar) { gtk_widget_hide(s->menu_bar); diff --git a/ui/meson.build b/ui/meson.build index 0a1e8272a3..0ccb3387ee 100644 --- a/ui/meson.build +++ b/ui/meson.build @@ -6,7 +6,6 @@ system_ss.add(png) system_ss.add(files( 'clipboard.c', 'console.c', - 'console-vc.c', 'cursor.c', 'input-keymap.c', 'input-legacy.c', @@ -19,6 +18,7 @@ system_ss.add(files( 'ui-qmp-cmds.c', 'util.c', )) +system_ss.add(when: pixman, if_true: files('console-vc.c'), if_false: files('console-vc-stubs.c')) if dbus_display system_ss.add(files('dbus-module.c')) endif @@ -46,7 +46,7 @@ vnc_ss.add(files( )) vnc_ss.add(zlib, jpeg, gnutls) vnc_ss.add(when: sasl, if_true: files('vnc-auth-sasl.c')) -system_ss.add_all(when: vnc, if_true: vnc_ss) +system_ss.add_all(when: [vnc, pixman], if_true: vnc_ss) system_ss.add(when: vnc, if_false: files('vnc-stubs.c')) ui_modules = {} @@ -60,8 +60,8 @@ endif system_ss.add(opengl) if opengl.found() opengl_ss = ss.source_set() - opengl_ss.add(gbm) - opengl_ss.add(when: [opengl, pixman], + opengl_ss.add(gbm, pixman) + opengl_ss.add(when: [opengl], if_true: files('shader.c', 'console-gl.c', 'egl-helpers.c', 'egl-context.c')) ui_modules += {'opengl' : opengl_ss} endif @@ -93,7 +93,7 @@ if dbus_display '--generate-c-code', '@BASENAME@']) dbus_display1_lib = static_library('dbus-display1', dbus_display1, dependencies: gio) dbus_display1_dep = declare_dependency(link_with: dbus_display1_lib, include_directories: include_directories('.')) - dbus_ss.add(when: [gio, pixman, dbus_display1_dep], + dbus_ss.add(when: [gio, dbus_display1_dep], if_true: [files( 'dbus-chardev.c', 'dbus-clipboard.c', @@ -101,7 +101,7 @@ if dbus_display 'dbus-error.c', 'dbus-listener.c', 'dbus.c', - ), opengl, gbm]) + ), opengl, gbm, pixman]) ui_modules += {'dbus' : dbus_ss} endif @@ -141,12 +141,12 @@ if spice.found() 'spice-display.c' )) ui_modules += {'spice-core' : spice_core_ss} -endif -if spice.found() and gio.found() - spice_ss = ss.source_set() - spice_ss.add(spice, gio, pixman, files('spice-app.c')) - ui_modules += {'spice-app': spice_ss} + if gio.found() + spice_ss = ss.source_set() + spice_ss.add(spice, gio, pixman, files('spice-app.c')) + ui_modules += {'spice-app': spice_ss} + endif endif keymaps = [ diff --git a/ui/qemu-pixman.c b/ui/qemu-pixman.c index b43ec38bf0..5ca55dd199 100644 --- a/ui/qemu-pixman.c +++ b/ui/qemu-pixman.c @@ -145,6 +145,7 @@ int qemu_pixman_get_type(int rshift, int gshift, int bshift) return type; } +#ifdef CONFIG_PIXMAN pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf) { pixman_format_code_t format; @@ -158,6 +159,7 @@ pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf) } return format; } +#endif /* * Return true for known-good pixman conversions. @@ -186,6 +188,7 @@ bool qemu_pixman_check_format(DisplayChangeListener *dcl, } } +#ifdef CONFIG_PIXMAN pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format, int width) { @@ -211,6 +214,7 @@ pixman_image_t *qemu_pixman_mirror_create(pixman_format_code_t format, NULL, pixman_image_get_stride(image)); } +#endif void qemu_pixman_image_unref(pixman_image_t *image) { @@ -220,6 +224,7 @@ void qemu_pixman_image_unref(pixman_image_t *image) pixman_image_unref(image); } +#ifdef CONFIG_PIXMAN pixman_image_t *qemu_pixman_glyph_from_vgafont(int height, const uint8_t *font, unsigned int ch) { @@ -262,3 +267,4 @@ void qemu_pixman_glyph_render(pixman_image_t *glyph, pixman_image_unref(ifg); pixman_image_unref(ibg); } +#endif /* CONFIG_PIXMAN */ @@ -172,11 +172,19 @@ static void sdl_update_caption(struct sdl2_console *scon) status = " [Stopped]"; } else if (gui_grab) { if (alt_grab) { +#ifdef CONFIG_DARWIN + status = " - Press ⌃⌥⇧G to exit grab"; +#else status = " - Press Ctrl-Alt-Shift-G to exit grab"; +#endif } else if (ctrl_grab) { status = " - Press Right-Ctrl-G to exit grab"; } else { +#ifdef CONFIG_DARWIN + status = " - Press ⌃⌥G to exit grab"; +#else status = " - Press Ctrl-Alt-G to exit grab"; +#endif } } diff --git a/ui/ui-hmp-cmds.c b/ui/ui-hmp-cmds.c index c671389473..26c8ced1f2 100644 --- a/ui/ui-hmp-cmds.c +++ b/ui/ui-hmp-cmds.c @@ -437,6 +437,7 @@ void sendkey_completion(ReadLineState *rs, int nb_args, const char *str) } } +#ifdef CONFIG_PIXMAN void coroutine_fn hmp_screendump(Monitor *mon, const QDict *qdict) { @@ -458,6 +459,7 @@ hmp_screendump(Monitor *mon, const QDict *qdict) end: hmp_handle_error(mon, err); } +#endif void hmp_client_migrate_info(Monitor *mon, const QDict *qdict) { diff --git a/ui/ui-qmp-cmds.c b/ui/ui-qmp-cmds.c index debc07d678..d772e1cb7f 100644 --- a/ui/ui-qmp-cmds.c +++ b/ui/ui-qmp-cmds.c @@ -212,6 +212,7 @@ void qmp_client_migrate_info(const char *protocol, const char *hostname, error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "protocol", "'spice'"); } +#ifdef CONFIG_PIXMAN #ifdef CONFIG_PNG /** * png_save: Take a screenshot as PNG @@ -391,3 +392,4 @@ qmp_screendump(const char *filename, const char *device, } } } +#endif /* CONFIG_PIXMAN */ diff --git a/ui/vnc-stubs.c b/ui/vnc-stubs.c index b4eb3ce718..a96bc86236 100644 --- a/ui/vnc-stubs.c +++ b/ui/vnc-stubs.c @@ -10,15 +10,3 @@ int vnc_display_pw_expire(const char *id, time_t expires) { return -ENODEV; }; -void vnc_parse(const char *str) -{ - if (strcmp(str, "none") == 0) { - return; - } - error_setg(&error_fatal, "VNC support is disabled"); -} -int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp) -{ - error_setg(errp, "VNC support is disabled"); - return -1; -} |