diff options
55 files changed, 2620 insertions, 1446 deletions
@@ -69,6 +69,7 @@ Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com> # git author config, or had utf8/latin1 encoding issues. Aaron Lindsay <aaron@os.amperecomputing.com> Alexey Gerasimenko <x1917x@gmail.com> +Alex Chen <alex.chen@huawei.com> Alex Ivanov <void@aleksoft.net> Andreas Färber <afaerber@suse.de> Bandan Das <bsd@redhat.com> @@ -99,9 +100,11 @@ Gautham R. Shenoy <ego@in.ibm.com> Gautham R. Shenoy <ego@linux.vnet.ibm.com> Gonglei (Arei) <arei.gonglei@huawei.com> Guang Wang <wang.guang55@zte.com.cn> +Haibin Zhang <haibinzhang@tencent.com> Hailiang Zhang <zhang.zhanghailiang@huawei.com> Hanna Reitz <hreitz@redhat.com> <mreitz@redhat.com> Hervé Poussineau <hpoussin@reactos.org> +Hyman Huang <huangy81@chinatelecom.cn> Jakub Jermář <jakub@jermar.eu> Jakub Jermář <jakub.jermar@kernkonzept.com> Jean-Christophe Dubois <jcd@tribudubois.net> @@ -135,6 +138,7 @@ Nicholas Thomas <nick@bytemark.co.uk> Nikunj A Dadhania <nikunj@linux.vnet.ibm.com> Orit Wasserman <owasserm@redhat.com> Paolo Bonzini <pbonzini@redhat.com> +Pan Nengyuan <pannengyuan@huawei.com> Pavel Dovgaluk <dovgaluk@ispras.ru> Pavel Dovgaluk <pavel.dovgaluk@gmail.com> Pavel Dovgaluk <Pavel.Dovgaluk@ispras.ru> diff --git a/MAINTAINERS b/MAINTAINERS index 53b63df407..d3879aa3c1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -262,8 +262,10 @@ F: hw/openrisc/ F: tests/tcg/openrisc/ PowerPC TCG CPUs -M: David Gibson <david@gibson.dropbear.id.au> -M: Greg Kurz <groug@kaod.org> +M: Cédric Le Goater <clg@kaod.org> +M: Daniel Henrique Barboza <danielhb413@gmail.com> +R: David Gibson <david@gibson.dropbear.id.au> +R: Greg Kurz <groug@kaod.org> L: qemu-ppc@nongnu.org S: Maintained F: target/ppc/ @@ -382,8 +384,10 @@ F: target/mips/kvm* F: target/mips/sysemu/ PPC KVM CPUs -M: David Gibson <david@gibson.dropbear.id.au> -M: Greg Kurz <groug@kaod.org> +M: Cédric Le Goater <clg@kaod.org> +M: Daniel Henrique Barboza <danielhb413@gmail.com> +R: David Gibson <david@gibson.dropbear.id.au> +R: Greg Kurz <groug@kaod.org> S: Maintained F: target/ppc/kvm.c @@ -1321,8 +1325,10 @@ F: include/hw/rtc/m48t59.h F: tests/avocado/ppc_prep_40p.py sPAPR -M: David Gibson <david@gibson.dropbear.id.au> -M: Greg Kurz <groug@kaod.org> +M: Cédric Le Goater <clg@kaod.org> +M: Daniel Henrique Barboza <danielhb413@gmail.com> +R: David Gibson <david@gibson.dropbear.id.au> +R: Greg Kurz <groug@kaod.org> L: qemu-ppc@nongnu.org S: Maintained F: hw/*/spapr* @@ -1382,6 +1388,8 @@ F: include/hw/pci-host/mv64361.h Virtual Open Firmware (VOF) M: Alexey Kardashevskiy <aik@ozlabs.ru> +R: Cédric Le Goater <clg@kaod.org> +R: Daniel Henrique Barboza <danielhb413@gmail.com> R: David Gibson <david@gibson.dropbear.id.au> R: Greg Kurz <groug@kaod.org> L: qemu-ppc@nongnu.org @@ -1 +1 @@ -6.1.50 +6.1.90 diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index 56f9ad15ab..600031210d 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -239,6 +239,17 @@ single ``bitmap``, the new ``block-export-add`` uses a list of ``bitmaps``. Member ``values`` in return value elements with meta-type ``enum`` is deprecated. Use ``members`` instead. +``drive-backup`` (since 6.2) +'''''''''''''''''''''''''''' + +Use ``blockdev-backup`` in combination with ``blockdev-add`` instead. +This change primarily separates the creation/opening process of the backup +target with explicit, separate steps. ``blockdev-backup`` uses mostly the +same arguments as ``drive-backup``, except the ``format`` and ``mode`` +options are removed in favor of using explicit ``blockdev-create`` and +``blockdev-add`` calls. See :doc:`/interop/live-block-operations` for +details. + System accelerators ------------------- @@ -370,9 +381,6 @@ The ``I7200`` guest CPU relies on the nanoMIPS ISA, which is deprecated (the ISA has never been upstreamed to a compiler toolchain). Therefore this CPU is also deprecated. -Related binaries ----------------- - Backwards compatibility ----------------------- diff --git a/docs/block-replication.txt b/docs/block-replication.txt index 108e9166a8..59eb2b33b3 100644 --- a/docs/block-replication.txt +++ b/docs/block-replication.txt @@ -79,7 +79,7 @@ Primary | || Secondary disk <--------- hidden-disk 5 <--------- || | | || | | || '-------------------------' - || drive-backup sync=none 6 + || blockdev-backup sync=none 6 1) The disk on the primary is represented by a block device with two children, providing replication between a primary disk and the host that @@ -101,7 +101,7 @@ should support bdrv_make_empty() and backing file. that is modified by the primary VM. It should also start as an empty disk, and the driver supports bdrv_make_empty() and backing file. -6) The drive-backup job (sync=none) is run to allow hidden-disk to buffer +6) The blockdev-backup job (sync=none) is run to allow hidden-disk to buffer any state that would otherwise be lost by the speculative write-through of the NBD server into the secondary disk. So before block replication, the primary disk and secondary disk should contain the same data. diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst index 38f2d7aad3..a3b5473089 100644 --- a/docs/devel/qapi-code-gen.rst +++ b/docs/devel/qapi-code-gen.rst @@ -956,15 +956,16 @@ definition must have documentation. Definition documentation starts with a line naming the definition, followed by an optional overview, a description of each argument (for commands and events), member (for structs and unions), branch (for -alternates), or value (for enums), and finally optional tagged -sections. +alternates), or value (for enums), a description of each feature (if +any), and finally optional tagged sections. -Descriptions of arguments can span multiple lines. The description -text can start on the line following the '\@argname:', in which case it -must not be indented at all. It can also start on the same line as -the '\@argname:'. In this case if it spans multiple lines then second -and subsequent lines must be indented to line up with the first -character of the first line of the description:: +The description of an argument or feature 'name' starts with +'\@name:'. The description text can start on the line following the +'\@name:', in which case it must not be indented at all. It can also +start on the same line as the '\@name:'. In this case if it spans +multiple lines then second and subsequent lines must be indented to +line up with the first character of the first line of the +description:: # @argone: # This is a two line description @@ -986,6 +987,12 @@ The number of spaces between the ':' and the text is not significant. Extensions added after the definition was first released carry a '(since x.y.z)' comment. +The feature descriptions must be preceded by a line "Features:", like +this:: + + # Features: + # @feature: Description text + A tagged section starts with one of the following words: "Note:"/"Notes:", "Since:", "Example"/"Examples", "Returns:", "TODO:". The section ends with the start of a new section. @@ -1000,12 +1007,6 @@ multiline argument descriptions. A 'Since: x.y.z' tagged section lists the release that introduced the definition. -The text of a section can start on a new line, in -which case it must not be indented at all. It can also start -on the same line as the 'Note:', 'Returns:', etc tag. In this -case if it spans multiple lines then second and subsequent -lines must be indented to match the first. - An 'Example' or 'Examples' section is automatically rendered entirely as literal fixed-width text. In other sections, the text is formatted, and rST markup can be used. diff --git a/docs/interop/bitmaps.rst b/docs/interop/bitmaps.rst index 059ad67929..1de46febdc 100644 --- a/docs/interop/bitmaps.rst +++ b/docs/interop/bitmaps.rst @@ -539,12 +539,11 @@ other partial disk images on top of a base image to reconstruct a full backup from the point in time at which the incremental backup was issued. The "Push Model" here references the fact that QEMU is "pushing" the modified -blocks out to a destination. We will be using the `drive-backup -<qemu-qmp-ref.html#index-drive_002dbackup>`_ and `blockdev-backup -<qemu-qmp-ref.html#index-blockdev_002dbackup>`_ QMP commands to create both +blocks out to a destination. We will be using the `blockdev-backup +<qemu-qmp-ref.html#index-blockdev_002dbackup>`_ QMP command to create both full and incremental backups. -Both of these commands are jobs, which have their own QMP API for querying and +The command is a background job, which has its own QMP API for querying and management documented in `Background jobs <qemu-qmp-ref.html#Background-jobs>`_. @@ -557,6 +556,10 @@ create a new incremental backup chain attached to a drive. This example creates a new, full backup of "drive0" and accompanies it with a new, empty bitmap that records writes from this point in time forward. +The target can be created with the help of `blockdev-add +<qemu-qmp-ref.html#index-blockdev_002dadd>`_ or `blockdev-create +<qemu-qmp-ref.html#index-blockdev_002dcreate>`_ command. + .. note:: Any new writes that happen after this command is issued, even while the backup job runs, will be written locally and not to the backup destination. These writes will be recorded in the bitmap @@ -576,12 +579,11 @@ new, empty bitmap that records writes from this point in time forward. } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", - "target": "/path/to/drive0.full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target0", + "sync": "full" } } ] @@ -664,12 +666,11 @@ use a transaction to reset the bitmap while making a new full backup: } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", - "target": "/path/to/drive0.new_full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target0", + "sync": "full" } } ] @@ -728,19 +729,35 @@ Example: First Incremental Backup $ qemu-img create -f qcow2 drive0.inc0.qcow2 \ -b drive0.full.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Issue an incremental backup command: .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc0.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -785,20 +802,36 @@ Example: Second Incremental Backup $ qemu-img create -f qcow2 drive0.inc1.qcow2 \ -b drive0.inc0.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc1.qcow2" + } + } + } + + <- { "return": {} } + #. Issue a new incremental backup command. The only difference here is that we have changed the target image below. .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc1.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -866,20 +899,36 @@ image: file for you, but you lose control over format options like compatibility and preallocation presets. +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc2.qcow2" + } + } + } + + <- { "return": {} } + #. Issue a new incremental backup command. Apart from the new destination image, there is no difference from the last two examples. .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc2.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -930,6 +979,38 @@ point in time. $ qemu-img create -f qcow2 drive0.full.qcow2 64G $ qemu-img create -f qcow2 drive1.full.qcow2 64G +#. Add target block nodes: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.full.qcow2" + } + } + } + + <- { "return": {} } + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target1", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive1.full.qcow2" + } + } + } + + <- { "return": {} } + #. Create a full (anchor) backup for each drive, with accompanying bitmaps: .. code-block:: QMP @@ -953,21 +1034,19 @@ point in time. } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", - "target": "/path/to/drive0.full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target0", + "sync": "full" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", - "target": "/path/to/drive1.full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target1", + "sync": "full" } } ] @@ -1016,6 +1095,38 @@ point in time. $ qemu-img create -f qcow2 drive1.inc0.qcow2 \ -b drive1.full.qcow2 -F qcow2 +#. Add target block nodes: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target1", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive1.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Issue a multi-drive incremental push backup transaction: .. code-block:: QMP @@ -1025,25 +1136,21 @@ point in time. "arguments": { "actions": [ { - "type": "drive-backup", + "type": "blockev-backup", "data": { "device": "drive0", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive0.inc0.qcow2" + "target": "target0" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive1.inc0.qcow2" + "target": "target1" } }, ] @@ -1119,19 +1226,35 @@ described above. This example demonstrates the single-job failure case: $ qemu-img create -f qcow2 drive0.inc0.qcow2 \ -b drive0.full.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Attempt to create an incremental backup via QMP: .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc0.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -1164,6 +1287,19 @@ described above. This example demonstrates the single-job failure case: "event": "BLOCK_JOB_COMPLETED" } +#. Remove target node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-del", + "arguments": { + "node-name": "target0", + } + } + + <- { "return": {} } + #. Delete the failed image, and re-create it. .. code:: bash @@ -1172,20 +1308,36 @@ described above. This example demonstrates the single-job failure case: $ qemu-img create -f qcow2 drive0.inc0.qcow2 \ -b drive0.full.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Retry the command after fixing the underlying problem, such as freeing up space on the backup volume: .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc0.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -1210,7 +1362,8 @@ described above. This example demonstrates the single-job failure case: Example: Partial Transactional Failures ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -QMP commands like `drive-backup <qemu-qmp-ref.html#index-drive_002dbackup>`_ +QMP commands like `blockdev-backup +<qemu-qmp-ref.html#index-blockdev_002dbackup>`_ conceptually only start a job, and so transactions containing these commands may succeed even if the job it created later fails. This might have surprising interactions with notions of how a "transaction" ought to behave. @@ -1240,25 +1393,21 @@ and one succeeds: "arguments": { "actions": [ { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive0.inc0.qcow2" + "target": "target0" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive1.inc0.qcow2" + "target": "target1" } }] } @@ -1375,25 +1524,21 @@ applied: }, "actions": [ { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive0.inc0.qcow2" + "target": "target0" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive1.inc0.qcow2" + "target": "target1" } }] } diff --git a/docs/interop/live-block-operations.rst b/docs/interop/live-block-operations.rst index 814c29bbe1..39e62c9915 100644 --- a/docs/interop/live-block-operations.rst +++ b/docs/interop/live-block-operations.rst @@ -116,8 +116,8 @@ QEMU block layer supports. (3) ``drive-mirror`` (and ``blockdev-mirror``): Synchronize a running disk to another image. -(4) ``drive-backup`` (and ``blockdev-backup``): Point-in-time (live) copy - of a block device to a destination. +(4) ``blockdev-backup`` (and the deprecated ``drive-backup``): + Point-in-time (live) copy of a block device to a destination. .. _`Interacting with a QEMU instance`: @@ -555,13 +555,14 @@ Currently, there are four different kinds: (3) ``none`` -- Synchronize only the new writes from this point on. - .. note:: In the case of ``drive-backup`` (or ``blockdev-backup``), - the behavior of ``none`` synchronization mode is different. - Normally, a ``backup`` job consists of two parts: Anything - that is overwritten by the guest is first copied out to - the backup, and in the background the whole image is - copied from start to end. With ``sync=none``, it's only - the first part. + .. note:: In the case of ``blockdev-backup`` (or deprecated + ``drive-backup``), the behavior of ``none`` + synchronization mode is different. Normally, a + ``backup`` job consists of two parts: Anything that is + overwritten by the guest is first copied out to the + backup, and in the background the whole image is copied + from start to end. With ``sync=none``, it's only the + first part. (4) ``incremental`` -- Synchronize content that is described by the dirty bitmap @@ -928,19 +929,22 @@ Shutdown the guest, by issuing the ``quit`` QMP command:: } -Live disk backup --- ``drive-backup`` and ``blockdev-backup`` -------------------------------------------------------------- +Live disk backup --- ``blockdev-backup`` and the deprecated``drive-backup`` +--------------------------------------------------------------------------- -The ``drive-backup`` (and its newer equivalent ``blockdev-backup``) allows +The ``blockdev-backup`` (and the deprecated ``drive-backup``) allows you to create a point-in-time snapshot. -In this case, the point-in-time is when you *start* the ``drive-backup`` -(or its newer equivalent ``blockdev-backup``) command. +In this case, the point-in-time is when you *start* the +``blockdev-backup`` (or deprecated ``drive-backup``) command. QMP invocation for ``drive-backup`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Note that ``drive-backup`` command is deprecated since QEMU 6.2 and +will be removed in future. + Yet again, starting afresh with our example disk image chain:: [A] <-- [B] <-- [C] <-- [D] @@ -965,11 +969,22 @@ will be issued, indicating the live block device job operation has completed, and no further action is required. +Moving from the deprecated ``drive-backup`` to newer ``blockdev-backup`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``blockdev-backup`` differs from ``drive-backup`` in how you specify +the backup target. With ``blockdev-backup`` you can't specify filename +as a target. Instead you use ``node-name`` of existing block node, +which you may add by ``blockdev-add`` or ``blockdev-create`` commands. +Correspondingly, ``blockdev-backup`` doesn't have ``mode`` and +``format`` arguments which don't apply to an existing block node. See +following sections for details and examples. + + Notes on ``blockdev-backup`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``blockdev-backup`` command is equivalent in functionality to -``drive-backup``, except that it operates at node-level in a Block Driver +The ``blockdev-backup`` command operates at node-level in a Block Driver State (BDS) graph. E.g. the sequence of actions to create a point-in-time backup diff --git a/hmp-commands.hx b/hmp-commands.hx index 3a5aeba3fe..70a9136ac2 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -382,7 +382,7 @@ SRST ERST { - .name = "stop", + .name = "stop|s", .args_type = "", .params = "", .help = "stop emulation", @@ -390,7 +390,7 @@ ERST }, SRST -``stop`` +``stop`` or ``s`` Stop emulation. ERST diff --git a/hw/char/goldfish_tty.c b/hw/char/goldfish_tty.c index 8365a18761..20b77885c1 100644 --- a/hw/char/goldfish_tty.c +++ b/hw/char/goldfish_tty.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish TTY * diff --git a/hw/display/macfb.c b/hw/display/macfb.c index 4b352eb89c..277d3e6633 100644 --- a/hw/display/macfb.c +++ b/hw/display/macfb.c @@ -440,21 +440,18 @@ static MacFbMode *macfb_find_mode(MacfbDisplayType display_type, static gchar *macfb_mode_list(void) { - gchar *list = NULL; - gchar *mode; + GString *list = g_string_new(""); MacFbMode *macfb_mode; int i; for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { macfb_mode = &macfb_mode_table[i]; - mode = g_strdup_printf(" %dx%dx%d\n", macfb_mode->width, + g_string_append_printf(list, " %dx%dx%d\n", macfb_mode->width, macfb_mode->height, macfb_mode->depth); - list = g_strconcat(mode, list, NULL); - g_free(mode); } - return list; + return g_string_free(list, FALSE); } @@ -643,7 +640,7 @@ static bool macfb_common_realize(DeviceState *dev, MacfbState *s, Error **errp) gchar *list; error_setg(errp, "unknown display mode: width %d, height %d, depth %d", s->width, s->height, s->depth); - list = macfb_mode_list(); + list = macfb_mode_list(); error_append_hint(errp, "Available modes:\n%s", list); g_free(list); diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c index e3b43a69f1..dfd53275f6 100644 --- a/hw/intc/goldfish_pic.c +++ b/hw/intc/goldfish_pic.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish PIC * diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c index 2133d2a698..0c515e4ecb 100644 --- a/hw/intc/m68k_irqc.c +++ b/hw/intc/m68k_irqc.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * QEMU Motorola 680x0 IRQ Controller * diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index 4e8bce5aa6..0efa4a45c7 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * QEMU Vitual M68K Machine * @@ -304,7 +304,21 @@ type_init(virt_machine_register_types) } \ type_init(machvirt_machine_##major##_##minor##_init); +static void virt_machine_6_2_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE(6, 2, true) + +static void virt_machine_6_1_options(MachineClass *mc) +{ + virt_machine_6_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); +} +DEFINE_VIRT_MACHINE(6, 1, false) + static void virt_machine_6_0_options(MachineClass *mc) { + virt_machine_6_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); } -DEFINE_VIRT_MACHINE(6, 0, true) +DEFINE_VIRT_MACHINE(6, 0, false) diff --git a/hw/misc/virt_ctrl.c b/hw/misc/virt_ctrl.c index 3552d7a09a..e75d1e7e17 100644 --- a/hw/misc/virt_ctrl.c +++ b/hw/misc/virt_ctrl.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Virt system Controller */ diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index e427ac2fe0..298e6b93e2 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -23,6 +23,7 @@ #include "hw/qdev-properties.h" #include "sysemu/reset.h" #include "sysemu/runstate.h" +#include "sysemu/qtest.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/fw-path-provider.h" @@ -199,7 +200,7 @@ static void pegasos2_init(MachineState *machine) if (!pm->vof) { warn_report("Option -kernel may be ineffective with -bios."); } - } else if (pm->vof) { + } else if (pm->vof && !qtest_enabled()) { warn_report("Using Virtual OpenFirmware but no -kernel option."); } diff --git a/hw/ppc/pnv_pnor.c b/hw/ppc/pnv_pnor.c index 5ef1cf2afb..83ecccca28 100644 --- a/hw/ppc/pnv_pnor.c +++ b/hw/ppc/pnv_pnor.c @@ -36,7 +36,7 @@ static void pnv_pnor_update(PnvPnor *s, int offset, int size) int offset_end; int ret; - if (s->blk) { + if (!s->blk || !blk_is_writable(s->blk)) { return; } diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c index 5822938448..56ab2a5fb6 100644 --- a/hw/ppc/spapr_numa.c +++ b/hw/ppc/spapr_numa.c @@ -546,12 +546,24 @@ static void spapr_numa_FORM2_write_rtas_tables(SpaprMachineState *spapr, * NUMA nodes, but QEMU adds the default NUMA node without * adding the numa_info to retrieve distance info from. */ - if (src == dst) { - distance_table[i++] = NUMA_DISTANCE_MIN; - continue; + distance_table[i] = numa_info[src].distance[dst]; + if (distance_table[i] == 0) { + /* + * In case QEMU adds a default NUMA single node when the user + * did not add any, or where the user did not supply distances, + * the value will be 0 here. Populate the table with a fallback + * simple local / remote distance. + */ + if (src == dst) { + distance_table[i] = NUMA_DISTANCE_MIN; + } else { + distance_table[i] = numa_info[src].distance[dst]; + if (distance_table[i] < NUMA_DISTANCE_MIN) { + distance_table[i] = NUMA_DISTANCE_DEFAULT; + } + } } - - distance_table[i++] = numa_info[src].distance[dst]; + i++; } } diff --git a/include/hw/char/goldfish_tty.h b/include/hw/char/goldfish_tty.h index b9dd67362a..7503d2fa1e 100644 --- a/include/hw/char/goldfish_tty.h +++ b/include/hw/char/goldfish_tty.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish TTY * diff --git a/include/hw/intc/goldfish_pic.h b/include/hw/intc/goldfish_pic.h index ad13ab37fc..e9d552f796 100644 --- a/include/hw/intc/goldfish_pic.h +++ b/include/hw/intc/goldfish_pic.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish PIC * diff --git a/include/hw/intc/m68k_irqc.h b/include/hw/intc/m68k_irqc.h index dbcfcfc2e0..ef91f21812 100644 --- a/include/hw/intc/m68k_irqc.h +++ b/include/hw/intc/m68k_irqc.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * QEMU Motorola 680x0 IRQ Controller * diff --git a/include/hw/misc/virt_ctrl.h b/include/hw/misc/virt_ctrl.h index edfadc4695..25a237e518 100644 --- a/include/hw/misc/virt_ctrl.h +++ b/include/hw/misc/virt_ctrl.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Virt system Controller */ diff --git a/include/libdecnumber/decNumber.h b/include/libdecnumber/decNumber.h index aa115fed07..41bc2a0d36 100644 --- a/include/libdecnumber/decNumber.h +++ b/include/libdecnumber/decNumber.h @@ -116,12 +116,16 @@ decNumber * decNumberFromUInt32(decNumber *, uint32_t); decNumber *decNumberFromInt64(decNumber *, int64_t); decNumber *decNumberFromUInt64(decNumber *, uint64_t); + decNumber *decNumberFromInt128(decNumber *, uint64_t, int64_t); + decNumber *decNumberFromUInt128(decNumber *, uint64_t, uint64_t); decNumber * decNumberFromString(decNumber *, const char *, decContext *); char * decNumberToString(const decNumber *, char *); char * decNumberToEngString(const decNumber *, char *); uint32_t decNumberToUInt32(const decNumber *, decContext *); int32_t decNumberToInt32(const decNumber *, decContext *); int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set); + void decNumberIntegralToInt128(const decNumber *dn, decContext *set, + uint64_t *plow, uint64_t *phigh); uint8_t * decNumberGetBCD(const decNumber *, uint8_t *); decNumber * decNumberSetBCD(decNumber *, const uint8_t *, uint32_t); diff --git a/include/libdecnumber/decNumberLocal.h b/include/libdecnumber/decNumberLocal.h index 4d53c077f2..6198ca8593 100644 --- a/include/libdecnumber/decNumberLocal.h +++ b/include/libdecnumber/decNumberLocal.h @@ -98,7 +98,7 @@ /* Shared lookup tables */ extern const uByte DECSTICKYTAB[10]; /* re-round digits if sticky */ - extern const uLong DECPOWERS[19]; /* powers of ten table */ + extern const uLong DECPOWERS[20]; /* powers of ten table */ /* The following are included from decDPD.h */ extern const uShort DPD2BIN[1024]; /* DPD -> 0-999 */ extern const uShort BIN2DPD[1000]; /* 0-999 -> DPD */ diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index a3a7ced78d..ca979dc6cc 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -590,6 +590,42 @@ static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret) #endif } +/* + * Unsigned 128x64 multiplication. + * Returns true if the result got truncated to 128 bits. + * Otherwise, returns false and the multiplication result via plow and phigh. + */ +static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor) +{ +#if defined(CONFIG_INT128) && \ + (__has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5) + bool res; + __uint128_t r; + __uint128_t f = ((__uint128_t)*phigh << 64) | *plow; + res = __builtin_mul_overflow(f, factor, &r); + + *plow = r; + *phigh = r >> 64; + + return res; +#else + uint64_t dhi = *phigh; + uint64_t dlo = *plow; + uint64_t ahi; + uint64_t blo, bhi; + + if (dhi == 0) { + mulu64(plow, phigh, dlo, factor); + return false; + } + + mulu64(plow, &ahi, dlo, factor); + mulu64(&blo, &bhi, dhi, factor); + + return uadd64_overflow(ahi, blo, phigh) || bhi != 0; +#endif +} + /** * uadd64_carry - addition with carry-in and carry-out * @x, @y: addends diff --git a/include/qom/object.h b/include/qom/object.h index faae0d841f..fae096f51c 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -1544,6 +1544,18 @@ Object *object_resolve_path_type(const char *path, const char *typename, bool *ambiguous); /** + * object_resolve_path_at: + * @parent: the object in which to resolve the path + * @path: the path to resolve + * + * This is like object_resolve_path(), except paths not starting with + * a slash are relative to @parent. + * + * Returns: The resolved object or NULL on path lookup failure. + */ +Object *object_resolve_path_at(Object *parent, const char *path); + +/** * object_resolve_path_component: * @parent: the object in which to resolve the path * @part: the component to resolve. diff --git a/libdecnumber/decContext.c b/libdecnumber/decContext.c index 7d97a65ac5..1956edf0a7 100644 --- a/libdecnumber/decContext.c +++ b/libdecnumber/decContext.c @@ -53,12 +53,13 @@ static const Flag *mfctop=(Flag *)&mfcone; /* -> top byte */ const uByte DECSTICKYTAB[10]={1,1,2,3,4,6,6,7,8,9}; /* used if sticky */ /* ------------------------------------------------------------------ */ -/* Powers of ten (powers[n]==10**n, 0<=n<=9) */ +/* Powers of ten (powers[n]==10**n, 0<=n<=19) */ /* ------------------------------------------------------------------ */ -const uLong DECPOWERS[19] = {1, 10, 100, 1000, 10000, 100000, 1000000, +const uLong DECPOWERS[20] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000ULL, 100000000000ULL, 1000000000000ULL, 10000000000000ULL, 100000000000000ULL, 1000000000000000ULL, - 10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL, }; + 10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL, + 10000000000000000000ULL,}; /* ------------------------------------------------------------------ */ /* decContextClearStatus -- clear bits in current status */ diff --git a/libdecnumber/decNumber.c b/libdecnumber/decNumber.c index 1ffe458ad8..31282adafd 100644 --- a/libdecnumber/decNumber.c +++ b/libdecnumber/decNumber.c @@ -167,6 +167,7 @@ /* ------------------------------------------------------------------ */ #include "qemu/osdep.h" +#include "qemu/host-utils.h" #include "libdecnumber/dconfig.h" #include "libdecnumber/decNumber.h" #include "libdecnumber/decNumberLocal.h" @@ -263,6 +264,7 @@ static decNumber * decTrim(decNumber *, decContext *, Flag, Int *); static Int decUnitAddSub(const Unit *, Int, const Unit *, Int, Int, Unit *, Int); static Int decUnitCompare(const Unit *, Int, const Unit *, Int, Int); +static bool mulUInt128ByPowOf10(uLong *, uLong *, uInt); #if !DECSUBSET /* decFinish == decFinalize when no subset arithmetic needed */ @@ -462,6 +464,41 @@ decNumber *decNumberFromUInt64(decNumber *dn, uint64_t uin) return dn; } /* decNumberFromUInt64 */ +decNumber *decNumberFromInt128(decNumber *dn, uint64_t lo, int64_t hi) +{ + uint64_t unsig_hi = hi; + if (hi < 0) { + if (lo == 0) { + unsig_hi = -unsig_hi; + } else { + unsig_hi = ~unsig_hi; + lo = -lo; + } + } + + decNumberFromUInt128(dn, lo, unsig_hi); + if (hi < 0) { + dn->bits = DECNEG; /* sign needed */ + } + return dn; +} /* decNumberFromInt128 */ + +decNumber *decNumberFromUInt128(decNumber *dn, uint64_t lo, uint64_t hi) +{ + uint64_t rem; + Unit *up; /* work pointer */ + decNumberZero(dn); /* clean */ + if (lo == 0 && hi == 0) { + return dn; /* [or decGetDigits bad call] */ + } + for (up = dn->lsu; hi > 0 || lo > 0; up++) { + rem = divu128(&lo, &hi, DECDPUNMAX + 1); + *up = (Unit)rem; + } + dn->digits = decGetDigits(dn->lsu, up - dn->lsu); + return dn; +} /* decNumberFromUInt128 */ + /* ------------------------------------------------------------------ */ /* to-int64 -- conversion to int64 */ /* */ @@ -506,6 +543,68 @@ Invalid: return 0; } /* decNumberIntegralToInt64 */ +/* ------------------------------------------------------------------ */ +/* decNumberIntegralToInt128 -- conversion to int128 */ +/* */ +/* dn is the decNumber to convert. dn is assumed to have been */ +/* rounded to a floating point integer value. */ +/* set is the context for reporting errors */ +/* returns the converted decNumber via plow and phigh */ +/* */ +/* Invalid is set if the decNumber is a NaN, Infinite or is out of */ +/* range for a signed 128 bit integer. */ +/* ------------------------------------------------------------------ */ + +void decNumberIntegralToInt128(const decNumber *dn, decContext *set, + uint64_t *plow, uint64_t *phigh) +{ + int d; /* work */ + const Unit *up; /* .. */ + uint64_t lo = 0, hi = 0; + + if (decNumberIsSpecial(dn) || (dn->exponent < 0) || + (dn->digits + dn->exponent > 39)) { + goto Invalid; + } + + up = dn->lsu; /* -> lsu */ + + for (d = (dn->digits - 1) / DECDPUN; d >= 0; d--) { + if (mulu128(&lo, &hi, DECDPUNMAX + 1)) { + /* overflow */ + goto Invalid; + } + if (uadd64_overflow(lo, up[d], &lo)) { + if (uadd64_overflow(hi, 1, &hi)) { + /* overflow */ + goto Invalid; + } + } + } + + if (mulUInt128ByPowOf10(&lo, &hi, dn->exponent)) { + /* overflow */ + goto Invalid; + } + + if (decNumberIsNegative(dn)) { + if (lo == 0) { + *phigh = -hi; + *plow = 0; + } else { + *phigh = ~hi; + *plow = -lo; + } + } else { + *plow = lo; + *phigh = hi; + } + + return; + +Invalid: + decContextSetStatus(set, DEC_Invalid_operation); +} /* decNumberIntegralToInt128 */ /* ------------------------------------------------------------------ */ /* to-scientific-string -- conversion to numeric string */ @@ -7849,6 +7948,38 @@ static Int decGetDigits(Unit *uar, Int len) { return digits; } /* decGetDigits */ +/* ------------------------------------------------------------------ */ +/* mulUInt128ByPowOf10 -- multiply a 128-bit unsigned integer by a */ +/* power of 10. */ +/* */ +/* The 128-bit factor composed of plow and phigh is multiplied */ +/* by 10^exp. */ +/* */ +/* plow pointer to the low 64 bits of the first factor */ +/* phigh pointer to the high 64 bits of the first factor */ +/* exp the exponent of the power of 10 of the second factor */ +/* */ +/* If the result fits in 128 bits, returns false and the */ +/* multiplication result through plow and phigh. */ +/* Otherwise, returns true. */ +/* ------------------------------------------------------------------ */ +static bool mulUInt128ByPowOf10(uLong *plow, uLong *phigh, uInt pow10) +{ + while (pow10 >= ARRAY_SIZE(powers)) { + if (mulu128(plow, phigh, powers[ARRAY_SIZE(powers) - 1])) { + /* Overflow */ + return true; + } + pow10 -= ARRAY_SIZE(powers) - 1; + } + + if (pow10 > 0) { + return mulu128(plow, phigh, powers[pow10]); + } else { + return false; + } +} + #if DECTRACE | DECCHECK /* ------------------------------------------------------------------ */ /* decNumberShow -- display a number [debug aid] */ diff --git a/meson.build b/meson.build index 6bfed294d0..9702fdce6d 100644 --- a/meson.build +++ b/meson.build @@ -340,7 +340,7 @@ if not get_option('tcg').disabled() error('Unsupported CPU @0@, try --enable-tcg-interpreter'.format(cpu)) endif elif get_option('tcg_interpreter') - warning('Use of the TCG interpretor is not recommended on this host') + warning('Use of the TCG interpreter is not recommended on this host') warning('architecture. There is a native TCG execution backend available') warning('which provides substantially better performance and reliability.') warning('It is strongly recommended to remove the --enable-tcg-interpreter') diff --git a/migration/colo.c b/migration/colo.c index e3b1f136f4..2415325262 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -459,6 +459,10 @@ static int colo_do_checkpoint_transaction(MigrationState *s, if (ret < 0) { goto out; } + + if (migrate_auto_converge()) { + mig_throttle_counter_reset(); + } /* * Only save VM's live state, which not including device state. * TODO: We may need a timeout mechanism to prevent COLO process diff --git a/migration/ram.c b/migration/ram.c index 847af461f2..863035d235 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -641,6 +641,15 @@ static void mig_throttle_guest_down(uint64_t bytes_dirty_period, } } +void mig_throttle_counter_reset(void) +{ + RAMState *rs = ram_state; + + rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + rs->num_dirty_pages_period = 0; + rs->bytes_xfer_prev = ram_counters.transferred; +} + /** * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache * @@ -836,6 +845,41 @@ migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb, } } +/* + * colo_bitmap_find_diry:find contiguous dirty pages from start + * + * Returns the page offset within memory region of the start of the contiguout + * dirty page + * + * @rs: current RAM state + * @rb: RAMBlock where to search for dirty pages + * @start: page where we start the search + * @num: the number of contiguous dirty pages + */ +static inline +unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, + unsigned long start, unsigned long *num) +{ + unsigned long size = rb->used_length >> TARGET_PAGE_BITS; + unsigned long *bitmap = rb->bmap; + unsigned long first, next; + + *num = 0; + + if (ramblock_is_ignored(rb)) { + return size; + } + + first = find_next_bit(bitmap, size, start); + if (first >= size) { + return first; + } + next = find_next_zero_bit(bitmap, size, first + 1); + assert(next >= first); + *num = next - first; + return first; +} + static inline bool migration_bitmap_clear_dirty(RAMState *rs, RAMBlock *rb, unsigned long page) @@ -3886,19 +3930,26 @@ void colo_flush_ram_cache(void) block = QLIST_FIRST_RCU(&ram_list.blocks); while (block) { - offset = migration_bitmap_find_dirty(ram_state, block, offset); + unsigned long num = 0; + offset = colo_bitmap_find_dirty(ram_state, block, offset, &num); if (!offset_in_ramblock(block, ((ram_addr_t)offset) << TARGET_PAGE_BITS)) { offset = 0; + num = 0; block = QLIST_NEXT_RCU(block, next); } else { - migration_bitmap_clear_dirty(ram_state, block, offset); + unsigned long i = 0; + + for (i = 0; i < num; i++) { + migration_bitmap_clear_dirty(ram_state, block, offset + i); + } dst_host = block->host + (((ram_addr_t)offset) << TARGET_PAGE_BITS); src_host = block->colo_cache + (((ram_addr_t)offset) << TARGET_PAGE_BITS); - memcpy(dst_host, src_host, TARGET_PAGE_SIZE); + memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num); + offset += num; } } } diff --git a/migration/ram.h b/migration/ram.h index dda1988f3d..c515396a9a 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -50,6 +50,7 @@ bool ramblock_is_ignored(RAMBlock *block); int xbzrle_cache_resize(uint64_t new_size, Error **errp); uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_total(void); +void mig_throttle_counter_reset(void); uint64_t ram_pagesize_summary(void); int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len); diff --git a/qapi/block-core.json b/qapi/block-core.json index 33e8507d10..1d3dd9cb48 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -1709,6 +1709,9 @@ # The operation can be stopped before it has completed using the # block-job-cancel command. # +# Features: +# @deprecated: This command is deprecated. Use @blockdev-backup instead. +# # Returns: - nothing on success # - If @device is not a valid block device, GenericError # @@ -1724,7 +1727,7 @@ # ## { 'command': 'drive-backup', 'boxed': true, - 'data': 'DriveBackup' } + 'data': 'DriveBackup', 'features': ['deprecated'] } ## # @blockdev-backup: diff --git a/qapi/machine.json b/qapi/machine.json index 17794ef681..067e3f5378 100644 --- a/qapi/machine.json +++ b/qapi/machine.json @@ -1417,107 +1417,143 @@ # # Query interrupt statistics # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: interrupt statistics # # Since: 6.2 ## { 'command': 'x-query-irq', - 'returns': 'HumanReadableText' } + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } ## # @x-query-jit: # # Query TCG compiler statistics # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: TCG compiler statistics # # Since: 6.2 ## { 'command': 'x-query-jit', 'returns': 'HumanReadableText', - 'if': 'CONFIG_TCG' } + 'if': 'CONFIG_TCG', + 'features': [ 'unstable' ] } ## # @x-query-numa: # # Query NUMA topology information # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: topology information # # Since: 6.2 ## { 'command': 'x-query-numa', - 'returns': 'HumanReadableText' } + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } ## # @x-query-opcount: # # Query TCG opcode counters # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: TCG opcode counters # # Since: 6.2 ## { 'command': 'x-query-opcount', 'returns': 'HumanReadableText', - 'if': 'CONFIG_TCG' } + 'if': 'CONFIG_TCG', + 'features': [ 'unstable' ] } ## # @x-query-profile: # # Query TCG profiling information # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: profile information # # Since: 6.2 ## { 'command': 'x-query-profile', - 'returns': 'HumanReadableText' } + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } ## # @x-query-ramblock: # # Query system ramblock information # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: system ramblock information # # Since: 6.2 ## { 'command': 'x-query-ramblock', - 'returns': 'HumanReadableText' } + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } ## # @x-query-rdma: # # Query RDMA state # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: RDMA state # # Since: 6.2 ## { 'command': 'x-query-rdma', - 'returns': 'HumanReadableText' } + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } ## # @x-query-roms: # # Query information on the registered ROMS # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: registered ROMs # # Since: 6.2 ## { 'command': 'x-query-roms', - 'returns': 'HumanReadableText' } + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } ## # @x-query-usb: # # Query information on the USB devices # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: USB device information # # Since: 6.2 ## { 'command': 'x-query-usb', - 'returns': 'HumanReadableText' } + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } diff --git a/qapi/migration.json b/qapi/migration.json index f0aefdab64..bbfd48cf0b 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -1796,7 +1796,7 @@ # @calc-time: time in units of second for sample dirty pages # # @sample-pages: page count per GB for sample dirty pages -# the default value is 512 (since 6.2) +# the default value is 512 (since 6.1) # # @mode: mode containing method of calculate dirtyrate includes # 'page-sampling' and 'dirty-ring' (Since 6.2) diff --git a/qapi/transaction.json b/qapi/transaction.json index d175b5f863..381a2df782 100644 --- a/qapi/transaction.json +++ b/qapi/transaction.json @@ -54,6 +54,10 @@ # @blockdev-snapshot-sync: since 1.1 # @drive-backup: Since 1.6 # +# Features: +# @deprecated: Member @drive-backup is deprecated. Use member +# @blockdev-backup instead. +# # Since: 1.1 ## { 'enum': 'TransactionActionKind', @@ -62,7 +66,7 @@ 'block-dirty-bitmap-disable', 'block-dirty-bitmap-merge', 'blockdev-backup', 'blockdev-snapshot', 'blockdev-snapshot-internal-sync', 'blockdev-snapshot-sync', - 'drive-backup' ] } + { 'name': 'drive-backup', 'features': [ 'deprecated' ] } ] } ## # @AbortWrapper: diff --git a/qom/object.c b/qom/object.c index 6be710bc40..4f0677cca9 100644 --- a/qom/object.c +++ b/qom/object.c @@ -2144,6 +2144,17 @@ Object *object_resolve_path(const char *path, bool *ambiguous) return object_resolve_path_type(path, TYPE_OBJECT, ambiguous); } +Object *object_resolve_path_at(Object *parent, const char *path) +{ + g_auto(GStrv) parts = g_strsplit(path, "/", 0); + + if (*path == '/') { + return object_resolve_abs_path(object_get_root(), parts + 1, + TYPE_OBJECT); + } + return object_resolve_abs_path(parent, parts, TYPE_OBJECT); +} + typedef struct StringProperty { char *(*get)(Object *, Error **); diff --git a/softmmu/qdev-monitor.c b/softmmu/qdev-monitor.c index f8b3a4cd82..b5aaae4b8c 100644 --- a/softmmu/qdev-monitor.c +++ b/softmmu/qdev-monitor.c @@ -871,15 +871,9 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp) static DeviceState *find_device_state(const char *id, Error **errp) { - Object *obj; + Object *obj = object_resolve_path_at(qdev_get_peripheral(), id); DeviceState *dev; - if (id[0] == '/') { - obj = object_resolve_path(id, NULL); - } else { - obj = object_resolve_path_component(qdev_get_peripheral(), id); - } - if (!obj) { error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, "Device '%s' not found", id); diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c index 07341a69f5..0d01ac3de0 100644 --- a/target/ppc/dfp_helper.c +++ b/target/ppc/dfp_helper.c @@ -51,6 +51,11 @@ static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src) dfp[1].VsrD(0) = src->VsrD(1); } +static void set_dfp128_to_avr(ppc_avr_t *dst, ppc_vsr_t *src) +{ + *dst = *src; +} + struct PPC_DFP { CPUPPCState *env; ppc_vsr_t vt, va, vb; @@ -440,8 +445,8 @@ static void ADD_PPs(struct PPC_DFP *dfp) dfp_check_for_VXISI_add(dfp); } -DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64) -DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128) +DFP_HELPER_TAB(DADD, decNumberAdd, ADD_PPs, 64) +DFP_HELPER_TAB(DADDQ, decNumberAdd, ADD_PPs, 128) static void SUB_PPs(struct PPC_DFP *dfp) { @@ -453,8 +458,8 @@ static void SUB_PPs(struct PPC_DFP *dfp) dfp_check_for_VXISI_subtract(dfp); } -DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64) -DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128) +DFP_HELPER_TAB(DSUB, decNumberSubtract, SUB_PPs, 64) +DFP_HELPER_TAB(DSUBQ, decNumberSubtract, SUB_PPs, 128) static void MUL_PPs(struct PPC_DFP *dfp) { @@ -466,8 +471,8 @@ static void MUL_PPs(struct PPC_DFP *dfp) dfp_check_for_VXIMZ(dfp); } -DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64) -DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128) +DFP_HELPER_TAB(DMUL, decNumberMultiply, MUL_PPs, 64) +DFP_HELPER_TAB(DMULQ, decNumberMultiply, MUL_PPs, 128) static void DIV_PPs(struct PPC_DFP *dfp) { @@ -481,8 +486,8 @@ static void DIV_PPs(struct PPC_DFP *dfp) dfp_check_for_VXIDI(dfp); } -DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64) -DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128) +DFP_HELPER_TAB(DDIV, decNumberDivide, DIV_PPs, 64) +DFP_HELPER_TAB(DDIVQ, decNumberDivide, DIV_PPs, 128) #define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ @@ -502,8 +507,8 @@ static void CMPU_PPs(struct PPC_DFP *dfp) dfp_check_for_VXSNAN(dfp); } -DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64) -DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128) +DFP_HELPER_BF_AB(DCMPU, decNumberCompare, CMPU_PPs, 64) +DFP_HELPER_BF_AB(DCMPUQ, decNumberCompare, CMPU_PPs, 128) static void CMPO_PPs(struct PPC_DFP *dfp) { @@ -513,8 +518,8 @@ static void CMPO_PPs(struct PPC_DFP *dfp) dfp_check_for_VXVC(dfp); } -DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64) -DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128) +DFP_HELPER_BF_AB(DCMPO, decNumberCompare, CMPO_PPs, 64) +DFP_HELPER_BF_AB(DCMPOQ, decNumberCompare, CMPO_PPs, 128) #define DFP_HELPER_TSTDC(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ @@ -541,8 +546,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ return dfp.crbf; \ } -DFP_HELPER_TSTDC(dtstdc, 64) -DFP_HELPER_TSTDC(dtstdcq, 128) +DFP_HELPER_TSTDC(DTSTDC, 64) +DFP_HELPER_TSTDC(DTSTDCQ, 128) #define DFP_HELPER_TSTDG(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ @@ -596,8 +601,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ return dfp.crbf; \ } -DFP_HELPER_TSTDG(dtstdg, 64) -DFP_HELPER_TSTDG(dtstdgq, 128) +DFP_HELPER_TSTDG(DTSTDG, 64) +DFP_HELPER_TSTDG(DTSTDGQ, 128) #define DFP_HELPER_TSTEX(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ @@ -628,8 +633,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ return dfp.crbf; \ } -DFP_HELPER_TSTEX(dtstex, 64) -DFP_HELPER_TSTEX(dtstexq, 128) +DFP_HELPER_TSTEX(DTSTEX, 64) +DFP_HELPER_TSTEX(DTSTEXQ, 128) #define DFP_HELPER_TSTSF(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ @@ -665,8 +670,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ return dfp.crbf; \ } -DFP_HELPER_TSTSF(dtstsf, 64) -DFP_HELPER_TSTSF(dtstsfq, 128) +DFP_HELPER_TSTSF(DTSTSF, 64) +DFP_HELPER_TSTSF(DTSTSFQ, 128) #define DFP_HELPER_TSTSFI(op, size) \ uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \ @@ -700,8 +705,8 @@ uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \ return dfp.crbf; \ } -DFP_HELPER_TSTSFI(dtstsfi, 64) -DFP_HELPER_TSTSFI(dtstsfiq, 128) +DFP_HELPER_TSTSFI(DTSTSFI, 64) +DFP_HELPER_TSTSFI(DTSTSFIQ, 128) static void QUA_PPs(struct PPC_DFP *dfp) { @@ -746,8 +751,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_QUAI(dquai, 64) -DFP_HELPER_QUAI(dquaiq, 128) +DFP_HELPER_QUAI(DQUAI, 64) +DFP_HELPER_QUAI(DQUAIQ, 128) #define DFP_HELPER_QUA(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ @@ -764,8 +769,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_QUA(dqua, 64) -DFP_HELPER_QUA(dquaq, 128) +DFP_HELPER_QUA(DQUA, 64) +DFP_HELPER_QUA(DQUAQ, 128) static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax, struct PPC_DFP *dfp) @@ -842,8 +847,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_RRND(drrnd, 64) -DFP_HELPER_RRND(drrndq, 128) +DFP_HELPER_RRND(DRRND, 64) +DFP_HELPER_RRND(DRRNDQ, 128) #define DFP_HELPER_RINT(op, postprocs, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ @@ -868,8 +873,8 @@ static void RINTX_PPs(struct PPC_DFP *dfp) dfp_check_for_VXSNAN(dfp); } -DFP_HELPER_RINT(drintx, RINTX_PPs, 64) -DFP_HELPER_RINT(drintxq, RINTX_PPs, 128) +DFP_HELPER_RINT(DRINTX, RINTX_PPs, 64) +DFP_HELPER_RINT(DRINTXQ, RINTX_PPs, 128) static void RINTN_PPs(struct PPC_DFP *dfp) { @@ -877,10 +882,10 @@ static void RINTN_PPs(struct PPC_DFP *dfp) dfp_check_for_VXSNAN(dfp); } -DFP_HELPER_RINT(drintn, RINTN_PPs, 64) -DFP_HELPER_RINT(drintnq, RINTN_PPs, 128) +DFP_HELPER_RINT(DRINTN, RINTN_PPs, 64) +DFP_HELPER_RINT(DRINTNQ, RINTN_PPs, 128) -void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DCTDP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; ppc_vsr_t vb; @@ -896,7 +901,7 @@ void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) dfp_set_FPRF_from_FRT(&dfp); } -void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DCTQPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; ppc_vsr_t vb; @@ -911,7 +916,7 @@ void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) set_dfp128(t, &dfp.vt); } -void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DRSP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; uint32_t t_short = 0; @@ -929,7 +934,7 @@ void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) set_dfp64(t, &vt); } -void helper_drdpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DRDPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; dfp_prepare_decimal128(&dfp, 0, b, env); @@ -967,8 +972,20 @@ static void CFFIX_PPs(struct PPC_DFP *dfp) dfp_check_for_XX(dfp); } -DFP_HELPER_CFFIX(dcffix, 64) -DFP_HELPER_CFFIX(dcffixq, 128) +DFP_HELPER_CFFIX(DCFFIX, 64) +DFP_HELPER_CFFIX(DCFFIXQ, 128) + +void helper_DCFFIXQQ(CPUPPCState *env, ppc_fprp_t *t, ppc_avr_t *b) +{ + struct PPC_DFP dfp; + + dfp_prepare_decimal128(&dfp, NULL, NULL, env); + decNumberFromInt128(&dfp.t, (uint64_t)b->VsrD(1), (int64_t)b->VsrD(0)); + dfp_finalize_decimal128(&dfp); + CFFIX_PPs(&dfp); + + set_dfp128(t, &dfp.vt); +} #define DFP_HELPER_CTFIX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ @@ -1005,8 +1022,55 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ set_dfp64(t, &dfp.vt); \ } -DFP_HELPER_CTFIX(dctfix, 64) -DFP_HELPER_CTFIX(dctfixq, 128) +DFP_HELPER_CTFIX(DCTFIX, 64) +DFP_HELPER_CTFIX(DCTFIXQ, 128) + +void helper_DCTFIXQQ(CPUPPCState *env, ppc_avr_t *t, ppc_fprp_t *b) +{ + struct PPC_DFP dfp; + dfp_prepare_decimal128(&dfp, 0, b, env); + + if (unlikely(decNumberIsSpecial(&dfp.b))) { + uint64_t invalid_flags = FP_VX | FP_VXCVI; + if (decNumberIsInfinite(&dfp.b)) { + if (decNumberIsNegative(&dfp.b)) { + dfp.vt.VsrD(0) = INT64_MIN; + dfp.vt.VsrD(1) = 0; + } else { + dfp.vt.VsrD(0) = INT64_MAX; + dfp.vt.VsrD(1) = UINT64_MAX; + } + } else { /* NaN */ + dfp.vt.VsrD(0) = INT64_MIN; + dfp.vt.VsrD(1) = 0; + if (decNumberIsSNaN(&dfp.b)) { + invalid_flags |= FP_VXSNAN; + } + } + dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); + } else if (unlikely(decNumberIsZero(&dfp.b))) { + dfp.vt.VsrD(0) = 0; + dfp.vt.VsrD(1) = 0; + } else { + decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); + decNumberIntegralToInt128(&dfp.b, &dfp.context, + &dfp.vt.VsrD(1), &dfp.vt.VsrD(0)); + if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { + if (decNumberIsNegative(&dfp.b)) { + dfp.vt.VsrD(0) = INT64_MIN; + dfp.vt.VsrD(1) = 0; + } else { + dfp.vt.VsrD(0) = INT64_MAX; + dfp.vt.VsrD(1) = UINT64_MAX; + } + dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); + } else { + dfp_check_for_XX(&dfp); + } + } + + set_dfp128_to_avr(t, &dfp.vt); +} static inline void dfp_set_bcd_digit_64(ppc_vsr_t *t, uint8_t digit, unsigned n) @@ -1067,8 +1131,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_DEDPD(ddedpd, 64) -DFP_HELPER_DEDPD(ddedpdq, 128) +DFP_HELPER_DEDPD(DDEDPD, 64) +DFP_HELPER_DEDPD(DDEDPDQ, 128) static inline uint8_t dfp_get_bcd_digit_64(ppc_vsr_t *t, unsigned n) { @@ -1135,8 +1199,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_ENBCD(denbcd, 64) -DFP_HELPER_ENBCD(denbcdq, 128) +DFP_HELPER_ENBCD(DENBCD, 64) +DFP_HELPER_ENBCD(DENBCDQ, 128) #define DFP_HELPER_XEX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ @@ -1169,8 +1233,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ } \ } -DFP_HELPER_XEX(dxex, 64) -DFP_HELPER_XEX(dxexq, 128) +DFP_HELPER_XEX(DXEX, 64) +DFP_HELPER_XEX(DXEXQ, 128) static void dfp_set_raw_exp_64(ppc_vsr_t *t, uint64_t raw) { @@ -1235,8 +1299,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_IEX(diex, 64) -DFP_HELPER_IEX(diexq, 128) +DFP_HELPER_IEX(DIEX, 64) +DFP_HELPER_IEX(DIEXQ, 128) static void dfp_clear_lmd_from_g5msb(uint64_t *t) { @@ -1323,7 +1387,7 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_SHIFT(dscli, 64, 1) -DFP_HELPER_SHIFT(dscliq, 128, 1) -DFP_HELPER_SHIFT(dscri, 64, 0) -DFP_HELPER_SHIFT(dscriq, 128, 0) +DFP_HELPER_SHIFT(DSCLI, 64, 1) +DFP_HELPER_SHIFT(DSCLIQ, 128, 1) +DFP_HELPER_SHIFT(DSCRI, 64, 0) +DFP_HELPER_SHIFT(DSCRIQ, 128, 0) diff --git a/target/ppc/helper.h b/target/ppc/helper.h index 4076aa281e..627811cefc 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -46,7 +46,9 @@ DEF_HELPER_4(divwe, tl, env, tl, tl, i32) DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_3(sraw, tl, env, tl, tl) -DEF_HELPER_FLAGS_2(cfuged, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(CFUGED, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(PDEPD, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(PEXTD, TCG_CALL_NO_RWG_SE, i64, i64, i64) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl) DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl) @@ -222,10 +224,10 @@ DEF_HELPER_3(vextractub, void, avr, avr, i32) DEF_HELPER_3(vextractuh, void, avr, avr, i32) DEF_HELPER_3(vextractuw, void, avr, avr, i32) DEF_HELPER_3(vextractd, void, avr, avr, i32) -DEF_HELPER_3(vinsertb, void, avr, avr, i32) -DEF_HELPER_3(vinserth, void, avr, avr, i32) -DEF_HELPER_3(vinsertw, void, avr, avr, i32) -DEF_HELPER_3(vinsertd, void, avr, avr, i32) +DEF_HELPER_4(VINSBLX, void, env, avr, i64, tl) +DEF_HELPER_4(VINSHLX, void, env, avr, i64, tl) +DEF_HELPER_4(VINSWLX, void, env, avr, i64, tl) +DEF_HELPER_4(VINSDLX, void, env, avr, i64, tl) DEF_HELPER_2(vextsb2w, void, avr, avr) DEF_HELPER_2(vextsh2w, void, avr, avr) DEF_HELPER_2(vextsb2d, void, avr, avr) @@ -332,6 +334,10 @@ DEF_HELPER_2(vextuwlx, tl, tl, avr) DEF_HELPER_2(vextubrx, tl, tl, avr) DEF_HELPER_2(vextuhrx, tl, tl, avr) DEF_HELPER_2(vextuwrx, tl, tl, avr) +DEF_HELPER_5(VEXTDUBVLX, void, env, avr, avr, avr, tl) +DEF_HELPER_5(VEXTDUHVLX, void, env, avr, avr, avr, tl) +DEF_HELPER_5(VEXTDUWVLX, void, env, avr, avr, avr, tl) +DEF_HELPER_5(VEXTDDVLX, void, env, avr, avr, avr, tl) DEF_HELPER_2(vsbox, void, avr, avr) DEF_HELPER_3(vcipher, void, avr, avr, avr) @@ -514,6 +520,10 @@ DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr) DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32) DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32) DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr) +DEF_HELPER_5(XXBLENDVB, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_5(XXBLENDVH, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_5(XXBLENDVW, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_5(XXBLENDVD, void, vsr, vsr, vsr, vsr, i32) DEF_HELPER_2(efscfsi, i32, env, i32) DEF_HELPER_2(efscfui, i32, env, i32) @@ -696,58 +706,60 @@ DEF_HELPER_3(store_601_batu, void, env, i32, tl) #define dh_alias_fprp ptr #define dh_ctype_fprp ppc_fprp_t * -DEF_HELPER_4(dadd, void, env, fprp, fprp, fprp) -DEF_HELPER_4(daddq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dsub, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dsubq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dmul, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dmulq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(ddiv, void, env, fprp, fprp, fprp) -DEF_HELPER_4(ddivq, void, env, fprp, fprp, fprp) -DEF_HELPER_3(dcmpo, i32, env, fprp, fprp) -DEF_HELPER_3(dcmpoq, i32, env, fprp, fprp) -DEF_HELPER_3(dcmpu, i32, env, fprp, fprp) -DEF_HELPER_3(dcmpuq, i32, env, fprp, fprp) -DEF_HELPER_3(dtstdc, i32, env, fprp, i32) -DEF_HELPER_3(dtstdcq, i32, env, fprp, i32) -DEF_HELPER_3(dtstdg, i32, env, fprp, i32) -DEF_HELPER_3(dtstdgq, i32, env, fprp, i32) -DEF_HELPER_3(dtstex, i32, env, fprp, fprp) -DEF_HELPER_3(dtstexq, i32, env, fprp, fprp) -DEF_HELPER_3(dtstsf, i32, env, fprp, fprp) -DEF_HELPER_3(dtstsfq, i32, env, fprp, fprp) -DEF_HELPER_3(dtstsfi, i32, env, i32, fprp) -DEF_HELPER_3(dtstsfiq, i32, env, i32, fprp) -DEF_HELPER_5(dquai, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(dquaiq, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(dqua, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(dquaq, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(drrnd, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(drrndq, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(drintx, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(drintxq, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(drintn, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(drintnq, void, env, fprp, fprp, i32, i32) -DEF_HELPER_3(dctdp, void, env, fprp, fprp) -DEF_HELPER_3(dctqpq, void, env, fprp, fprp) -DEF_HELPER_3(drsp, void, env, fprp, fprp) -DEF_HELPER_3(drdpq, void, env, fprp, fprp) -DEF_HELPER_3(dcffix, void, env, fprp, fprp) -DEF_HELPER_3(dcffixq, void, env, fprp, fprp) -DEF_HELPER_3(dctfix, void, env, fprp, fprp) -DEF_HELPER_3(dctfixq, void, env, fprp, fprp) -DEF_HELPER_4(ddedpd, void, env, fprp, fprp, i32) -DEF_HELPER_4(ddedpdq, void, env, fprp, fprp, i32) -DEF_HELPER_4(denbcd, void, env, fprp, fprp, i32) -DEF_HELPER_4(denbcdq, void, env, fprp, fprp, i32) -DEF_HELPER_3(dxex, void, env, fprp, fprp) -DEF_HELPER_3(dxexq, void, env, fprp, fprp) -DEF_HELPER_4(diex, void, env, fprp, fprp, fprp) -DEF_HELPER_4(diexq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dscri, void, env, fprp, fprp, i32) -DEF_HELPER_4(dscriq, void, env, fprp, fprp, i32) -DEF_HELPER_4(dscli, void, env, fprp, fprp, i32) -DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32) +DEF_HELPER_4(DADD, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DADDQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DSUB, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DSUBQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DMUL, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DMULQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DDIV, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DDIVQ, void, env, fprp, fprp, fprp) +DEF_HELPER_3(DCMPO, i32, env, fprp, fprp) +DEF_HELPER_3(DCMPOQ, i32, env, fprp, fprp) +DEF_HELPER_3(DCMPU, i32, env, fprp, fprp) +DEF_HELPER_3(DCMPUQ, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTDC, i32, env, fprp, i32) +DEF_HELPER_3(DTSTDCQ, i32, env, fprp, i32) +DEF_HELPER_3(DTSTDG, i32, env, fprp, i32) +DEF_HELPER_3(DTSTDGQ, i32, env, fprp, i32) +DEF_HELPER_3(DTSTEX, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTEXQ, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTSF, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTSFQ, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTSFI, i32, env, i32, fprp) +DEF_HELPER_3(DTSTSFIQ, i32, env, i32, fprp) +DEF_HELPER_5(DQUAI, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DQUAIQ, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DQUA, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DQUAQ, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DRRND, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DRRNDQ, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DRINTX, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DRINTXQ, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DRINTN, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DRINTNQ, void, env, fprp, fprp, i32, i32) +DEF_HELPER_3(DCTDP, void, env, fprp, fprp) +DEF_HELPER_3(DCTQPQ, void, env, fprp, fprp) +DEF_HELPER_3(DRSP, void, env, fprp, fprp) +DEF_HELPER_3(DRDPQ, void, env, fprp, fprp) +DEF_HELPER_3(DCFFIX, void, env, fprp, fprp) +DEF_HELPER_3(DCFFIXQ, void, env, fprp, fprp) +DEF_HELPER_3(DCFFIXQQ, void, env, fprp, avr) +DEF_HELPER_3(DCTFIX, void, env, fprp, fprp) +DEF_HELPER_3(DCTFIXQ, void, env, fprp, fprp) +DEF_HELPER_3(DCTFIXQQ, void, env, avr, fprp) +DEF_HELPER_4(DDEDPD, void, env, fprp, fprp, i32) +DEF_HELPER_4(DDEDPDQ, void, env, fprp, fprp, i32) +DEF_HELPER_4(DENBCD, void, env, fprp, fprp, i32) +DEF_HELPER_4(DENBCDQ, void, env, fprp, fprp, i32) +DEF_HELPER_3(DXEX, void, env, fprp, fprp) +DEF_HELPER_3(DXEXQ, void, env, fprp, fprp) +DEF_HELPER_4(DIEX, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DIEXQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DSCRI, void, env, fprp, fprp, i32) +DEF_HELPER_4(DSCRIQ, void, env, fprp, fprp, i32) +DEF_HELPER_4(DSCLI, void, env, fprp, fprp, i32) +DEF_HELPER_4(DSCLIQ, void, env, fprp, fprp, i32) DEF_HELPER_1(tbegin, void, env) DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env) diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index 9fd8d6b817..e135b8aba4 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -24,25 +24,142 @@ @D_bfs ...... bf:3 - l:1 ra:5 imm:s16 &D_bf @D_bfu ...... bf:3 - l:1 ra:5 imm:16 &D_bf +%dq_si 4:s12 !function=times_16 +%dq_rtp 22:4 !function=times_2 +@DQ_rtp ...... ....0 ra:5 ............ .... &D rt=%dq_rtp si=%dq_si + +%dq_rt_tsx 3:1 21:5 +@DQ_TSX ...... ..... ra:5 ............ .... &D si=%dq_si rt=%dq_rt_tsx + +%rt_tsxp 21:1 22:4 !function=times_2 +@DQ_TSXP ...... ..... ra:5 ............ .... &D si=%dq_si rt=%rt_tsxp + %ds_si 2:s14 !function=times_4 @DS ...... rt:5 ra:5 .............. .. &D si=%ds_si +%ds_rtp 22:4 !function=times_2 +@DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si + &DX rt d %dx_d 6:s10 16:5 0:1 @DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d +&VA vrt vra vrb rc +@VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA + +&VN vrt vra vrb sh +@VN ...... vrt:5 vra:5 vrb:5 .. sh:3 ...... &VN + &VX vrt vra vrb @VX ...... vrt:5 vra:5 vrb:5 .......... . &VX +&VX_uim4 vrt uim vrb +@VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4 + &X rt ra rb @X ...... rt:5 ra:5 rb:5 .......... . &X +&X_rc rt ra rb rc:bool +@X_rc ...... rt:5 ra:5 rb:5 .......... rc:1 &X_rc + +%x_frtp 22:4 !function=times_2 +%x_frap 17:4 !function=times_2 +%x_frbp 12:4 !function=times_2 +@X_tp_ap_bp_rc ...... ....0 ....0 ....0 .......... rc:1 &X_rc rt=%x_frtp ra=%x_frap rb=%x_frbp + +@X_tp_a_bp_rc ...... ....0 ra:5 ....0 .......... rc:1 &X_rc rt=%x_frtp rb=%x_frbp + +&X_tb_rc rt rb rc:bool +@X_tb_rc ...... rt:5 ..... rb:5 .......... rc:1 &X_tb_rc + +@X_tbp_rc ...... ....0 ..... ....0 .......... rc:1 &X_tb_rc rt=%x_frtp rb=%x_frbp + +@X_tp_b_rc ...... ....0 ..... rb:5 .......... rc:1 &X_tb_rc rt=%x_frtp + +@X_t_bp_rc ...... rt:5 ..... ....0 .......... rc:1 &X_tb_rc rb=%x_frbp + &X_bi rt bi @X_bi ...... rt:5 bi:5 ----- .......... - &X_bi +&X_bf bf ra rb +@X_bf ...... bf:3 .. ra:5 rb:5 .......... . &X_bf + +@X_bf_ap_bp ...... bf:3 .. ....0 ....0 .......... . &X_bf ra=%x_frap rb=%x_frbp + +@X_bf_a_bp ...... bf:3 .. ra:5 ....0 .......... . &X_bf rb=%x_frbp + +&X_bf_uim bf uim rb +@X_bf_uim ...... bf:3 . uim:6 rb:5 .......... . &X_bf_uim + +@X_bf_uim_bp ...... bf:3 . uim:6 ....0 .......... . &X_bf_uim rb=%x_frbp + &X_bfl bf l:bool ra rb @X_bfl ...... bf:3 - l:1 ra:5 rb:5 ..........- &X_bfl +%x_xt 0:1 21:5 +&X_imm8 xt imm:uint8_t +@X_imm8 ...... ..... .. imm:8 .......... . &X_imm8 xt=%x_xt + +&X_uim5 xt uim:uint8_t +@X_uim5 ...... ..... ..... uim:5 .......... . &X_uim5 xt=%x_xt + +&X_tb_sp_rc rt rb sp rc:bool +@X_tb_sp_rc ...... rt:5 sp:2 ... rb:5 .......... rc:1 &X_tb_sp_rc + +@X_tbp_sp_rc ...... ....0 sp:2 ... ....0 .......... rc:1 &X_tb_sp_rc rt=%x_frtp rb=%x_frbp + +&X_tb_s_rc rt rb s:bool rc:bool +@X_tb_s_rc ...... rt:5 s:1 .... rb:5 .......... rc:1 &X_tb_s_rc + +@X_tbp_s_rc ...... ....0 s:1 .... ....0 .......... rc:1 &X_tb_s_rc rt=%x_frtp rb=%x_frbp + +%x_rt_tsx 0:1 21:5 +@X_TSX ...... ..... ra:5 rb:5 .......... . &X rt=%x_rt_tsx +@X_TSXP ...... ..... ra:5 rb:5 .......... . &X rt=%rt_tsxp + +&X_frtp_vrb frtp vrb +@X_frtp_vrb ...... ....0 ..... vrb:5 .......... . &X_frtp_vrb frtp=%x_frtp + +&X_vrt_frbp vrt frbp +@X_vrt_frbp ...... vrt:5 ..... ....0 .......... . &X_vrt_frbp frbp=%x_frbp + +&XX2 xt xb uim:uint8_t +%xx2_xt 0:1 21:5 +%xx2_xb 1:1 11:5 +@XX2 ...... ..... ... uim:2 ..... ......... .. &XX2 xt=%xx2_xt xb=%xx2_xb + +&Z22_bf_fra bf fra dm +@Z22_bf_fra ...... bf:3 .. fra:5 dm:6 ......... . &Z22_bf_fra + +%z22_frap 17:4 !function=times_2 +@Z22_bf_frap ...... bf:3 .. ....0 dm:6 ......... . &Z22_bf_fra fra=%z22_frap + +&Z22_ta_sh_rc rt ra sh rc:bool +@Z22_ta_sh_rc ...... rt:5 ra:5 sh:6 ......... rc:1 &Z22_ta_sh_rc + +%z22_frtp 22:4 !function=times_2 +@Z22_tap_sh_rc ...... ....0 ....0 sh:6 ......... rc:1 &Z22_ta_sh_rc rt=%z22_frtp ra=%z22_frap + +&Z23_tab frt fra frb rmc rc:bool +@Z23_tab ...... frt:5 fra:5 frb:5 rmc:2 ........ rc:1 &Z23_tab + +%z23_frtp 22:4 !function=times_2 +%z23_frap 17:4 !function=times_2 +%z23_frbp 12:4 !function=times_2 +@Z23_tabp ...... ....0 ....0 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp fra=%z23_frap frb=%z23_frbp + +@Z23_tp_a_bp ...... ....0 fra:5 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp frb=%z23_frbp + +&Z23_tb frt frb r:bool rmc rc:bool +@Z23_tb ...... frt:5 .... r:1 frb:5 rmc:2 ........ rc:1 &Z23_tb + +@Z23_tbp ...... ....0 .... r:1 ....0 rmc:2 ........ rc:1 &Z23_tb frt=%z23_frtp frb=%z23_frbp + +&Z23_te_tb te frt frb rmc rc:bool +@Z23_te_tb ...... frt:5 te:5 frb:5 rmc:2 ........ rc:1 &Z23_te_tb + +@Z23_te_tbp ...... ....0 te:5 ....0 rmc:2 ........ rc:1 &Z23_te_tb frt=%z23_frtp frb=%z23_frbp + ### Fixed-Point Load Instructions LBZ 100010 ..... ..... ................ @D @@ -74,6 +191,8 @@ LDU 111010 ..... ..... ..............01 @DS LDX 011111 ..... ..... ..... 0000010101 - @X LDUX 011111 ..... ..... ..... 0000110101 - @X +LQ 111000 ..... ..... ............ ---- @DQ_rtp + ### Fixed-Point Store Instructions STB 100110 ..... ..... ................ @D @@ -96,6 +215,8 @@ STDU 111110 ..... ..... ..............01 @DS STDX 011111 ..... ..... ..... 0010010101 - @X STDUX 011111 ..... ..... ..... 0010110101 - @X +STQ 111110 ..... ..... ..............10 @DS_rtp + ### Fixed-Point Compare Instructions CMP 011111 ... - . ..... ..... 0000000000 - @X_bfl @@ -113,6 +234,34 @@ ADDPCIS 010011 ..... ..... .......... 00010 . @DX ## Fixed-Point Logical Instructions CFUGED 011111 ..... ..... ..... 0011011100 - @X +CNTLZDM 011111 ..... ..... ..... 0000111011 - @X +CNTTZDM 011111 ..... ..... ..... 1000111011 - @X +PDEPD 011111 ..... ..... ..... 0010011100 - @X +PEXTD 011111 ..... ..... ..... 0010111100 - @X + +### Float-Point Load Instructions + +LFS 110000 ..... ..... ................ @D +LFSU 110001 ..... ..... ................ @D +LFSX 011111 ..... ..... ..... 1000010111 - @X +LFSUX 011111 ..... ..... ..... 1000110111 - @X + +LFD 110010 ..... ..... ................ @D +LFDU 110011 ..... ..... ................ @D +LFDX 011111 ..... ..... ..... 1001010111 - @X +LFDUX 011111 ..... ..... ..... 1001110111 - @X + +### Float-Point Store Instructions + +STFS 110100 ..... ...... ............... @D +STFSU 110101 ..... ...... ............... @D +STFSX 011111 ..... ...... .... 1010010111 - @X +STFSUX 011111 ..... ...... .... 1010110111 - @X + +STFD 110110 ..... ...... ............... @D +STFDU 110111 ..... ...... ............... @D +STFDX 011111 ..... ...... .... 1011010111 - @X +STFDUX 011111 ..... ...... .... 1011110111 - @X ### Move To/From System Register Instructions @@ -121,6 +270,160 @@ SETBCR 011111 ..... ..... ----- 0110100000 - @X_bi SETNBC 011111 ..... ..... ----- 0111000000 - @X_bi SETNBCR 011111 ..... ..... ----- 0111100000 - @X_bi +### Decimal Floating-Point Arithmetic Instructions + +DADD 111011 ..... ..... ..... 0000000010 . @X_rc +DADDQ 111111 ..... ..... ..... 0000000010 . @X_tp_ap_bp_rc + +DSUB 111011 ..... ..... ..... 1000000010 . @X_rc +DSUBQ 111111 ..... ..... ..... 1000000010 . @X_tp_ap_bp_rc + +DMUL 111011 ..... ..... ..... 0000100010 . @X_rc +DMULQ 111111 ..... ..... ..... 0000100010 . @X_tp_ap_bp_rc + +DDIV 111011 ..... ..... ..... 1000100010 . @X_rc +DDIVQ 111111 ..... ..... ..... 1000100010 . @X_tp_ap_bp_rc + +### Decimal Floating-Point Compare Instructions + +DCMPU 111011 ... -- ..... ..... 1010000010 - @X_bf +DCMPUQ 111111 ... -- ..... ..... 1010000010 - @X_bf_ap_bp + +DCMPO 111011 ... -- ..... ..... 0010000010 - @X_bf +DCMPOQ 111111 ... -- ..... ..... 0010000010 - @X_bf_ap_bp + +### Decimal Floating-Point Test Instructions + +DTSTDC 111011 ... -- ..... ...... 011000010 - @Z22_bf_fra +DTSTDCQ 111111 ... -- ..... ...... 011000010 - @Z22_bf_frap + +DTSTDG 111011 ... -- ..... ...... 011100010 - @Z22_bf_fra +DTSTDGQ 111111 ... -- ..... ...... 011100010 - @Z22_bf_frap + +DTSTEX 111011 ... -- ..... ..... 0010100010 - @X_bf +DTSTEXQ 111111 ... -- ..... ..... 0010100010 - @X_bf_ap_bp + +DTSTSF 111011 ... -- ..... ..... 1010100010 - @X_bf +DTSTSFQ 111111 ... -- ..... ..... 1010100010 - @X_bf_a_bp + +DTSTSFI 111011 ... - ...... ..... 1010100011 - @X_bf_uim +DTSTSFIQ 111111 ... - ...... ..... 1010100011 - @X_bf_uim_bp + +### Decimal Floating-Point Quantum Adjustment Instructions + +DQUAI 111011 ..... ..... ..... .. 01000011 . @Z23_te_tb +DQUAIQ 111111 ..... ..... ..... .. 01000011 . @Z23_te_tbp + +DQUA 111011 ..... ..... ..... .. 00000011 . @Z23_tab +DQUAQ 111111 ..... ..... ..... .. 00000011 . @Z23_tabp + +DRRND 111011 ..... ..... ..... .. 00100011 . @Z23_tab +DRRNDQ 111111 ..... ..... ..... .. 00100011 . @Z23_tp_a_bp + +DRINTX 111011 ..... ---- . ..... .. 01100011 . @Z23_tb +DRINTXQ 111111 ..... ---- . ..... .. 01100011 . @Z23_tbp + +DRINTN 111011 ..... ---- . ..... .. 11100011 . @Z23_tb +DRINTNQ 111111 ..... ---- . ..... .. 11100011 . @Z23_tbp + +### Decimal Floating-Point Conversion Instructions + +DCTDP 111011 ..... ----- ..... 0100000010 . @X_tb_rc +DCTQPQ 111111 ..... ----- ..... 0100000010 . @X_tp_b_rc + +DRSP 111011 ..... ----- ..... 1100000010 . @X_tb_rc +DRDPQ 111111 ..... ----- ..... 1100000010 . @X_tbp_rc + +DCFFIX 111011 ..... ----- ..... 1100100010 . @X_tb_rc +DCFFIXQ 111111 ..... ----- ..... 1100100010 . @X_tp_b_rc +DCFFIXQQ 111111 ..... 00000 ..... 1111100010 - @X_frtp_vrb + +DCTFIX 111011 ..... ----- ..... 0100100010 . @X_tb_rc +DCTFIXQ 111111 ..... ----- ..... 0100100010 . @X_t_bp_rc +DCTFIXQQ 111111 ..... 00001 ..... 1111100010 - @X_vrt_frbp + +### Decimal Floating-Point Format Instructions + +DDEDPD 111011 ..... .. --- ..... 0101000010 . @X_tb_sp_rc +DDEDPDQ 111111 ..... .. --- ..... 0101000010 . @X_tbp_sp_rc + +DENBCD 111011 ..... . ---- ..... 1101000010 . @X_tb_s_rc +DENBCDQ 111111 ..... . ---- ..... 1101000010 . @X_tbp_s_rc + +DXEX 111011 ..... ----- ..... 0101100010 . @X_tb_rc +DXEXQ 111111 ..... ----- ..... 0101100010 . @X_t_bp_rc + +DIEX 111011 ..... ..... ..... 1101100010 . @X_rc +DIEXQ 111111 ..... ..... ..... 1101100010 . @X_tp_a_bp_rc + +DSCLI 111011 ..... ..... ...... 001000010 . @Z22_ta_sh_rc +DSCLIQ 111111 ..... ..... ...... 001000010 . @Z22_tap_sh_rc + +DSCRI 111011 ..... ..... ...... 001100010 . @Z22_ta_sh_rc +DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc + ## Vector Bit Manipulation Instruction VCFUGED 000100 ..... ..... ..... 10101001101 @VX +VCLZDM 000100 ..... ..... ..... 11110000100 @VX +VCTZDM 000100 ..... ..... ..... 11111000100 @VX +VPDEPD 000100 ..... ..... ..... 10111001101 @VX +VPEXTD 000100 ..... ..... ..... 10110001101 @VX + +## Vector Permute and Formatting Instruction + +VEXTDUBVLX 000100 ..... ..... ..... ..... 011000 @VA +VEXTDUBVRX 000100 ..... ..... ..... ..... 011001 @VA +VEXTDUHVLX 000100 ..... ..... ..... ..... 011010 @VA +VEXTDUHVRX 000100 ..... ..... ..... ..... 011011 @VA +VEXTDUWVLX 000100 ..... ..... ..... ..... 011100 @VA +VEXTDUWVRX 000100 ..... ..... ..... ..... 011101 @VA +VEXTDDVLX 000100 ..... ..... ..... ..... 011110 @VA +VEXTDDVRX 000100 ..... ..... ..... ..... 011111 @VA + +VINSERTB 000100 ..... - .... ..... 01100001101 @VX_uim4 +VINSERTH 000100 ..... - .... ..... 01101001101 @VX_uim4 +VINSERTW 000100 ..... - .... ..... 01110001101 @VX_uim4 +VINSERTD 000100 ..... - .... ..... 01111001101 @VX_uim4 + +VINSBLX 000100 ..... ..... ..... 01000001111 @VX +VINSBRX 000100 ..... ..... ..... 01100001111 @VX +VINSHLX 000100 ..... ..... ..... 01001001111 @VX +VINSHRX 000100 ..... ..... ..... 01101001111 @VX +VINSWLX 000100 ..... ..... ..... 01010001111 @VX +VINSWRX 000100 ..... ..... ..... 01110001111 @VX +VINSDLX 000100 ..... ..... ..... 01011001111 @VX +VINSDRX 000100 ..... ..... ..... 01111001111 @VX + +VINSW 000100 ..... - .... ..... 00011001111 @VX_uim4 +VINSD 000100 ..... - .... ..... 00111001111 @VX_uim4 + +VINSBVLX 000100 ..... ..... ..... 00000001111 @VX +VINSBVRX 000100 ..... ..... ..... 00100001111 @VX +VINSHVLX 000100 ..... ..... ..... 00001001111 @VX +VINSHVRX 000100 ..... ..... ..... 00101001111 @VX +VINSWVLX 000100 ..... ..... ..... 00010001111 @VX +VINSWVRX 000100 ..... ..... ..... 00110001111 @VX + +VSLDBI 000100 ..... ..... ..... 00 ... 010110 @VN +VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN + +# VSX Load/Store Instructions + +LXV 111101 ..... ..... ............ . 001 @DQ_TSX +STXV 111101 ..... ..... ............ . 101 @DQ_TSX +LXVP 000110 ..... ..... ............ 0000 @DQ_TSXP +STXVP 000110 ..... ..... ............ 0001 @DQ_TSXP +LXVX 011111 ..... ..... ..... 0100 - 01100 . @X_TSX +STXVX 011111 ..... ..... ..... 0110001100 . @X_TSX +LXVPX 011111 ..... ..... ..... 0101001101 - @X_TSXP +STXVPX 011111 ..... ..... ..... 0111001101 - @X_TSXP + +## VSX splat instruction + +XXSPLTIB 111100 ..... 00 ........ 0101101000 . @X_imm8 +XXSPLTW 111100 ..... ---.. ..... 010100100 . . @XX2 + +## VSX Vector Load Special Value Instruction + +LXVKQ 111100 ..... 11111 ..... 0101101000 . @X_uim5 diff --git a/target/ppc/insn64.decode b/target/ppc/insn64.decode index 72c5944a53..39e610913d 100644 --- a/target/ppc/insn64.decode +++ b/target/ppc/insn64.decode @@ -23,6 +23,36 @@ @PLS_D ...... .. ... r:1 .. .................. \ ...... rt:5 ra:5 ................ \ &PLS_D si=%pls_si +@8LS_D_TSX ...... .. . .. r:1 .. .................. \ + ..... rt:6 ra:5 ................ \ + &PLS_D si=%pls_si + +%rt_tsxp 21:1 22:4 !function=times_2 +@8LS_D_TSXP ...... .. . .. r:1 .. .................. \ + ...... ..... ra:5 ................ \ + &PLS_D si=%pls_si rt=%rt_tsxp + +# Format 8RR:D +%8rr_si 32:s16 0:16 +%8rr_xt 16:1 21:5 +&8RR_D_IX xt ix si +@8RR_D_IX ...... .. .... .. .. ................ \ + ...... ..... ... ix:1 . ................ \ + &8RR_D_IX si=%8rr_si xt=%8rr_xt +&8RR_D xt si:int32_t +@8RR_D ...... .. .... .. .. ................ \ + ...... ..... .... . ................ \ + &8RR_D si=%8rr_si xt=%8rr_xt + +# Format XX4 +&XX4 xt xa xb xc +%xx4_xt 0:1 21:5 +%xx4_xa 2:1 16:5 +%xx4_xb 1:1 11:5 +%xx4_xc 3:1 6:5 +@XX4 ........ ........ ........ ........ \ + ...... ..... ..... ..... ..... .. .... \ + &XX4 xt=%xx4_xt xa=%xx4_xa xb=%xx4_xb xc=%xx4_xc ### Fixed-Point Load Instructions @@ -38,6 +68,8 @@ PLWA 000001 00 0--.-- .................. \ 101001 ..... ..... ................ @PLS_D PLD 000001 00 0--.-- .................. \ 111001 ..... ..... ................ @PLS_D +PLQ 000001 00 0--.-- .................. \ + 111000 ..... ..... ................ @PLS_D ### Fixed-Point Store Instructions @@ -50,12 +82,25 @@ PSTH 000001 10 0--.-- .................. \ PSTD 000001 00 0--.-- .................. \ 111101 ..... ..... ................ @PLS_D +PSTQ 000001 00 0--.-- .................. \ + 111100 ..... ..... ................ @PLS_D ### Fixed-Point Arithmetic Instructions PADDI 000001 10 0--.-- .................. \ 001110 ..... ..... ................ @PLS_D +### Float-Point Load and Store Instructions + +PLFS 000001 10 0--.-- .................. \ + 110000 ..... ..... ................ @PLS_D +PLFD 000001 10 0--.-- .................. \ + 110010 ..... ..... ................ @PLS_D +PSTFS 000001 10 0--.-- .................. \ + 110100 ..... ..... ................ @PLS_D +PSTFD 000001 10 0--.-- .................. \ + 110110 ..... ..... ................ @PLS_D + ### Prefixed No-operation Instruction @PNOP 000001 11 0000-- 000000000000000000 \ @@ -122,3 +167,30 @@ PADDI 000001 10 0--.-- .................. \ PNOP ................................ \ -------------------------------- @PNOP } + +### VSX instructions + +PLXV 000001 00 0--.-- .................. \ + 11001 ...... ..... ................ @8LS_D_TSX +PSTXV 000001 00 0--.-- .................. \ + 11011 ...... ..... ................ @8LS_D_TSX +PLXVP 000001 00 0--.-- .................. \ + 111010 ..... ..... ................ @8LS_D_TSXP +PSTXVP 000001 00 0--.-- .................. \ + 111110 ..... ..... ................ @8LS_D_TSXP + +XXSPLTIDP 000001 01 0000 -- -- ................ \ + 100000 ..... 0010 . ................ @8RR_D +XXSPLTIW 000001 01 0000 -- -- ................ \ + 100000 ..... 0011 . ................ @8RR_D +XXSPLTI32DX 000001 01 0000 -- -- ................ \ + 100000 ..... 000 .. ................ @8RR_D_IX + +XXBLENDVD 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 11 .... @XX4 +XXBLENDVW 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 10 .... @XX4 +XXBLENDVH 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 01 .... @XX4 +XXBLENDVB 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 00 .... @XX4 diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index eeb7781a9e..9bc327bcba 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -324,7 +324,7 @@ target_ulong helper_popcntb(target_ulong val) } #endif -uint64_t helper_cfuged(uint64_t src, uint64_t mask) +uint64_t helper_CFUGED(uint64_t src, uint64_t mask) { /* * Instead of processing the mask bit-by-bit from the most significant to @@ -386,6 +386,42 @@ uint64_t helper_cfuged(uint64_t src, uint64_t mask) return left | (right >> n); } +uint64_t helper_PDEPD(uint64_t src, uint64_t mask) +{ + int i, o; + uint64_t result = 0; + + if (mask == -1) { + return src; + } + + for (i = 0; mask != 0; i++) { + o = ctz64(mask); + mask &= mask - 1; + result |= ((src >> i) & 1) << o; + } + + return result; +} + +uint64_t helper_PEXTD(uint64_t src, uint64_t mask) +{ + int i, o; + uint64_t result = 0; + + if (mask == -1) { + return src; + } + + for (o = 0; mask != 0; o++) { + i = ctz64(mask); + mask &= mask - 1; + result |= ((src >> i) & 1) << o; + } + + return result; +} + /*****************************************************************************/ /* PowerPC 601 specific instructions (POWER bridge) */ target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2) @@ -1577,25 +1613,73 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) } #if defined(HOST_WORDS_BIGENDIAN) -#define VINSERT(suffix, element) \ - void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ - { \ - memmove(&r->u8[index], &b->u8[8 - sizeof(r->element[0])], \ - sizeof(r->element[0])); \ - } +#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[IDX]) #else -#define VINSERT(suffix, element) \ - void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ - { \ - uint32_t d = (16 - index) - sizeof(r->element[0]); \ - memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \ - } +#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[15 - (IDX)] - (SIZE) + 1) #endif -VINSERT(b, u8) -VINSERT(h, u16) -VINSERT(w, u32) -VINSERT(d, u64) -#undef VINSERT + +#define VINSX(SUFFIX, TYPE) \ +void glue(glue(helper_VINS, SUFFIX), LX)(CPUPPCState *env, ppc_avr_t *t, \ + uint64_t val, target_ulong index) \ +{ \ + const int maxidx = ARRAY_SIZE(t->u8) - sizeof(TYPE); \ + target_long idx = index; \ + \ + if (idx < 0 || idx > maxidx) { \ + idx = idx < 0 ? sizeof(TYPE) - idx : idx; \ + qemu_log_mask(LOG_GUEST_ERROR, \ + "Invalid index for Vector Insert Element after 0x" TARGET_FMT_lx \ + ", RA = " TARGET_FMT_ld " > %d\n", env->nip, idx, maxidx); \ + } else { \ + TYPE src = val; \ + memcpy(ELEM_ADDR(t, idx, sizeof(TYPE)), &src, sizeof(TYPE)); \ + } \ +} +VINSX(B, uint8_t) +VINSX(H, uint16_t) +VINSX(W, uint32_t) +VINSX(D, uint64_t) +#undef ELEM_ADDR +#undef VINSX +#if defined(HOST_WORDS_BIGENDIAN) +#define VEXTDVLX(NAME, SIZE) \ +void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \ + target_ulong index) \ +{ \ + const target_long idx = index; \ + ppc_avr_t tmp[2] = { *a, *b }; \ + memset(t, 0, sizeof(*t)); \ + if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \ + memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2 - SIZE], (void *)tmp + idx, SIZE); \ + } else { \ + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \ + TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \ + env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \ + } \ +} +#else +#define VEXTDVLX(NAME, SIZE) \ +void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \ + target_ulong index) \ +{ \ + const target_long idx = index; \ + ppc_avr_t tmp[2] = { *b, *a }; \ + memset(t, 0, sizeof(*t)); \ + if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \ + memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2], \ + (void *)tmp + sizeof(tmp) - SIZE - idx, SIZE); \ + } else { \ + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \ + TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \ + env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \ + } \ +} +#endif +VEXTDVLX(VEXTDUBVLX, 1) +VEXTDVLX(VEXTDUHVLX, 2) +VEXTDVLX(VEXTDUWVLX, 4) +VEXTDVLX(VEXTDDVLX, 8) +#undef VEXTDVLX #if defined(HOST_WORDS_BIGENDIAN) #define VEXTRACT(suffix, element) \ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ @@ -1653,6 +1737,21 @@ void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt, *xt = t; } +#define XXBLEND(name, sz) \ +void glue(helper_XXBLENDV, name)(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \ + ppc_avr_t *c, uint32_t desc) \ +{ \ + for (int i = 0; i < ARRAY_SIZE(t->glue(u, sz)); i++) { \ + t->glue(u, sz)[i] = (c->glue(s, sz)[i] >> (sz - 1)) ? \ + b->glue(u, sz)[i] : a->glue(u, sz)[i]; \ + } \ +} +XXBLEND(B, 8) +XXBLEND(H, 16) +XXBLEND(W, 32) +XXBLEND(D, 64) +#undef XXBLEND + #define VEXT_SIGNED(name, element, cast) \ void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \ { \ diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 518337bcb7..9960df6e18 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -3197,6 +3197,20 @@ static inline void gen_align_no_le(DisasContext *ctx) (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE); } +static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ) +{ + TCGv ea = tcg_temp_new(); + if (ra) { + tcg_gen_add_tl(ea, cpu_gpr[ra], displ); + } else { + tcg_gen_mov_tl(ea, displ); + } + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(ea, ea); + } + return ea; +} + /*** Integer load ***/ #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask) #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP)) @@ -3313,69 +3327,6 @@ GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST) GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) - -/* lq */ -static void gen_lq(DisasContext *ctx) -{ - int ra, rd; - TCGv EA, hi, lo; - - /* lq is a legal user mode instruction starting in ISA 2.07 */ - bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - - if (!legal_in_user_mode && ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); - return; - } - - if (!le_is_supported && ctx->le_mode) { - gen_align_no_le(ctx); - return; - } - ra = rA(ctx->opcode); - rd = rD(ctx->opcode); - if (unlikely((rd & 1) || rd == ra)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - return; - } - - gen_set_access_type(ctx, ACCESS_INT); - EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0x0F); - - /* Note that the low part is always in RD+1, even in LE mode. */ - lo = cpu_gpr[rd + 1]; - hi = cpu_gpr[rd]; - - if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { - if (HAVE_ATOMIC128) { - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); - gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); - } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); - gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); - } - tcg_temp_free_i32(oi); - tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh)); - } else { - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; - } - } else if (ctx->le_mode) { - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ); - } else { - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ); - } - tcg_temp_free(EA); -} #endif /*** Integer store ***/ @@ -3421,90 +3372,6 @@ GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) - -static void gen_std(DisasContext *ctx) -{ - int rs; - TCGv EA; - - rs = rS(ctx->opcode); - if ((ctx->opcode & 0x3) == 0x2) { /* stq */ - bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - TCGv hi, lo; - - if (!(ctx->insns_flags & PPC_64BX)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - } - - if (!legal_in_user_mode && ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); - return; - } - - if (!le_is_supported && ctx->le_mode) { - gen_align_no_le(ctx); - return; - } - - if (unlikely(rs & 1)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - return; - } - gen_set_access_type(ctx, ACCESS_INT); - EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0x03); - - /* Note that the low part is always in RS+1, even in LE mode. */ - lo = cpu_gpr[rs + 1]; - hi = cpu_gpr[rs]; - - if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { - if (HAVE_ATOMIC128) { - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128, - ctx->mem_idx)); - gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi); - } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128, - ctx->mem_idx)); - gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi); - } - tcg_temp_free_i32(oi); - } else { - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; - } - } else if (ctx->le_mode) { - tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_LEQ); - } else { - tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_BEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_BEQ); - } - tcg_temp_free(EA); - } else { - /* std / stdu */ - if (Rc(ctx->opcode)) { - if (unlikely(rA(ctx->opcode) == 0)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - return; - } - } - gen_set_access_type(ctx, ACCESS_INT); - EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0x03); - gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA); - if (Rc(ctx->opcode)) { - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); - } - tcg_temp_free(EA); - } -} #endif /*** Integer load and store with byte reverse ***/ @@ -7438,11 +7305,21 @@ static inline void set_avr64(int regno, TCGv_i64 src, bool high) /* * Helpers for decodetree used by !function for decoding arguments. */ +static int times_2(DisasContext *ctx, int x) +{ + return x * 2; +} + static int times_4(DisasContext *ctx, int x) { return x * 4; } +static int times_16(DisasContext *ctx, int x) +{ + return x * 16; +} + /* * Helpers for trans_* functions to check for specific insns flags. * Use token pasting to ensure that we use the proper flag with the @@ -7469,6 +7346,30 @@ static int times_4(DisasContext *ctx, int x) # define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B) #endif +#define REQUIRE_VECTOR(CTX) \ + do { \ + if (unlikely(!(CTX)->altivec_enabled)) { \ + gen_exception((CTX), POWERPC_EXCP_VPU); \ + return true; \ + } \ + } while (0) + +#define REQUIRE_VSX(CTX) \ + do { \ + if (unlikely(!(CTX)->vsx_enabled)) { \ + gen_exception((CTX), POWERPC_EXCP_VSXU); \ + return true; \ + } \ + } while (0) + +#define REQUIRE_FPU(ctx) \ + do { \ + if (unlikely(!(ctx)->fpu_enabled)) { \ + gen_exception((ctx), POWERPC_EXCP_FPU); \ + return true; \ + } \ + } while (0) + /* * Helpers for implementing sets of trans_* functions. * Defer the implementation of NAME to FUNC, with optional extra arguments. @@ -7488,6 +7389,25 @@ static int times_4(DisasContext *ctx, int x) #include "decode-insn64.c.inc" #include "power8-pmu-regs.c.inc" +/* + * Incorporate CIA into the constant when R=1. + * Validate that when R=1, RA=0. + */ +static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) +{ + d->rt = a->rt; + d->ra = a->ra; + d->si = a->si; + if (a->r) { + if (unlikely(a->ra != 0)) { + gen_invalid(ctx); + return false; + } + d->si += ctx->cia; + } + return true; +} + #include "translate/fixedpoint-impl.c.inc" #include "translate/fp-impl.c.inc" @@ -7495,7 +7415,6 @@ static int times_4(DisasContext *ctx, int x) #include "translate/vmx-impl.c.inc" #include "translate/vsx-impl.c.inc" -#include "translate/vector-impl.c.inc" #include "translate/dfp-impl.c.inc" @@ -7527,20 +7446,7 @@ static void gen_dform39(DisasContext *ctx) /* handles stfdp, lxv, stxsd, stxssp lxvx */ static void gen_dform3D(DisasContext *ctx) { - if ((ctx->opcode & 3) == 1) { /* DQ-FORM */ - switch (ctx->opcode & 0x7) { - case 1: /* lxv */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_lxv(ctx); - } - break; - case 5: /* stxv */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_stxv(ctx); - } - break; - } - } else { /* DS-FORM */ + if ((ctx->opcode & 3) != 1) { /* DS-FORM */ switch (ctx->opcode & 0x3) { case 0: /* stfdp */ if (ctx->insns_flags2 & PPC2_ISA205) { @@ -7663,13 +7569,9 @@ GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000, GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000, PPC_NONE, PPC2_ISA300), #endif -#if defined(TARGET_PPC64) -GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX), -GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B), -#endif /* handles lfdp, lxsd, lxssp */ GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), -/* handles stfdp, lxv, stxsd, stxssp, stxv */ +/* handles stfdp, stxsd, stxssp */ GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), @@ -8171,8 +8073,6 @@ GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \ #include "translate/vsx-ops.c.inc" -#include "translate/dfp-ops.c.inc" - #include "translate/spe-ops.c.inc" }; diff --git a/target/ppc/translate/dfp-impl.c.inc b/target/ppc/translate/dfp-impl.c.inc index 6c556dc2e1..f9f1d58d44 100644 --- a/target/ppc/translate/dfp-impl.c.inc +++ b/target/ppc/translate/dfp-impl.c.inc @@ -7,226 +7,223 @@ static inline TCGv_ptr gen_fprp_ptr(int reg) return r; } -#define GEN_DFP_T_A_B_Rc(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rd, ra, rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rd = gen_fprp_ptr(rD(ctx->opcode)); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_env, rd, ra, rb); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rd); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ +#define TRANS_DFP_T_A_B_Rc(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, ra, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->rt); \ + ra = gen_fprp_ptr(a->ra); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_env, rt, ra, rb); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_BF_A_B(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr ra, rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ - cpu_env, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ +#define TRANS_DFP_BF_A_B(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr ra, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + ra = gen_fprp_ptr(a->ra); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_crf[a->bf], \ + cpu_env, ra, rb); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_BF_I_B(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_i32 uim; \ - TCGv_ptr rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - uim = tcg_const_i32(UIMM5(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ - cpu_env, uim, rb); \ - tcg_temp_free_i32(uim); \ - tcg_temp_free_ptr(rb); \ +#define TRANS_DFP_BF_I_B(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_crf[a->bf], \ + cpu_env, tcg_constant_i32(a->uim), rb);\ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_BF_A_DCM(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr ra; \ - TCGv_i32 dcm; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - dcm = tcg_const_i32(DCM(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ - cpu_env, ra, dcm); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_i32(dcm); \ +#define TRANS_DFP_BF_A_DCM(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr ra; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + ra = gen_fprp_ptr(a->fra); \ + gen_helper_##NAME(cpu_crf[a->bf], \ + cpu_env, ra, tcg_constant_i32(a->dm)); \ + tcg_temp_free_ptr(ra); \ + return true; \ } -#define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, rb; \ - TCGv_i32 u32_1, u32_2; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \ - u32_2 = tcg_const_i32(u32f2(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, rb, u32_1, u32_2); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_i32(u32_1); \ - tcg_temp_free_i32(u32_2); \ +#define TRANS_DFP_T_B_U32_U32_Rc(NAME, U32F1, U32F2) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->frt); \ + rb = gen_fprp_ptr(a->frb); \ + gen_helper_##NAME(cpu_env, rt, rb, \ + tcg_constant_i32(a->U32F1), \ + tcg_constant_i32(a->U32F2)); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, ra, rb; \ - TCGv_i32 i32; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - i32 = tcg_const_i32(i32fld(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, ra, rb, i32); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_i32(i32); \ - } - -#define GEN_DFP_T_B_Rc(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, rb); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ - } - -#define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, rs; \ - TCGv_i32 i32; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - rs = gen_fprp_ptr(fprfld(ctx->opcode)); \ - i32 = tcg_const_i32(i32fld(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, rs, i32); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rs); \ - tcg_temp_free_i32(i32); \ +#define TRANS_DFP_T_A_B_I32_Rc(NAME, I32FLD) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, ra, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->frt); \ + ra = gen_fprp_ptr(a->fra); \ + rb = gen_fprp_ptr(a->frb); \ + gen_helper_##NAME(cpu_env, rt, ra, rb, \ + tcg_constant_i32(a->I32FLD)); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -GEN_DFP_T_A_B_Rc(dadd) -GEN_DFP_T_A_B_Rc(daddq) -GEN_DFP_T_A_B_Rc(dsub) -GEN_DFP_T_A_B_Rc(dsubq) -GEN_DFP_T_A_B_Rc(dmul) -GEN_DFP_T_A_B_Rc(dmulq) -GEN_DFP_T_A_B_Rc(ddiv) -GEN_DFP_T_A_B_Rc(ddivq) -GEN_DFP_BF_A_B(dcmpu) -GEN_DFP_BF_A_B(dcmpuq) -GEN_DFP_BF_A_B(dcmpo) -GEN_DFP_BF_A_B(dcmpoq) -GEN_DFP_BF_A_DCM(dtstdc) -GEN_DFP_BF_A_DCM(dtstdcq) -GEN_DFP_BF_A_DCM(dtstdg) -GEN_DFP_BF_A_DCM(dtstdgq) -GEN_DFP_BF_A_B(dtstex) -GEN_DFP_BF_A_B(dtstexq) -GEN_DFP_BF_A_B(dtstsf) -GEN_DFP_BF_A_B(dtstsfq) -GEN_DFP_BF_I_B(dtstsfi) -GEN_DFP_BF_I_B(dtstsfiq) -GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC) -GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC) -GEN_DFP_T_A_B_I32_Rc(dqua, RMC) -GEN_DFP_T_A_B_I32_Rc(dquaq, RMC) -GEN_DFP_T_A_B_I32_Rc(drrnd, RMC) -GEN_DFP_T_A_B_I32_Rc(drrndq, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC) -GEN_DFP_T_B_Rc(dctdp) -GEN_DFP_T_B_Rc(dctqpq) -GEN_DFP_T_B_Rc(drsp) -GEN_DFP_T_B_Rc(drdpq) -GEN_DFP_T_B_Rc(dcffix) -GEN_DFP_T_B_Rc(dcffixq) -GEN_DFP_T_B_Rc(dctfix) -GEN_DFP_T_B_Rc(dctfixq) -GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP) -GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP) -GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP) -GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP) -GEN_DFP_T_B_Rc(dxex) -GEN_DFP_T_B_Rc(dxexq) -GEN_DFP_T_A_B_Rc(diex) -GEN_DFP_T_A_B_Rc(diexq) -GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM) -GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM) -GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM) -GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM) - -#undef GEN_DFP_T_A_B_Rc -#undef GEN_DFP_BF_A_B -#undef GEN_DFP_BF_A_DCM -#undef GEN_DFP_T_B_U32_U32_Rc -#undef GEN_DFP_T_A_B_I32_Rc -#undef GEN_DFP_T_B_Rc -#undef GEN_DFP_T_FPR_I32_Rc +#define TRANS_DFP_T_B_Rc(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->rt); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_env, rt, rb); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(rb); \ + return true; \ +} + +#define TRANS_DFP_T_FPR_I32_Rc(NAME, FPRFLD, I32FLD) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, rx; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->rt); \ + rx = gen_fprp_ptr(a->FPRFLD); \ + gen_helper_##NAME(cpu_env, rt, rx, \ + tcg_constant_i32(a->I32FLD)); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(rx); \ + return true; \ +} + +TRANS_DFP_T_A_B_Rc(DADD) +TRANS_DFP_T_A_B_Rc(DADDQ) +TRANS_DFP_T_A_B_Rc(DSUB) +TRANS_DFP_T_A_B_Rc(DSUBQ) +TRANS_DFP_T_A_B_Rc(DMUL) +TRANS_DFP_T_A_B_Rc(DMULQ) +TRANS_DFP_T_A_B_Rc(DDIV) +TRANS_DFP_T_A_B_Rc(DDIVQ) +TRANS_DFP_BF_A_B(DCMPU) +TRANS_DFP_BF_A_B(DCMPUQ) +TRANS_DFP_BF_A_B(DCMPO) +TRANS_DFP_BF_A_B(DCMPOQ) +TRANS_DFP_BF_A_DCM(DTSTDC) +TRANS_DFP_BF_A_DCM(DTSTDCQ) +TRANS_DFP_BF_A_DCM(DTSTDG) +TRANS_DFP_BF_A_DCM(DTSTDGQ) +TRANS_DFP_BF_A_B(DTSTEX) +TRANS_DFP_BF_A_B(DTSTEXQ) +TRANS_DFP_BF_A_B(DTSTSF) +TRANS_DFP_BF_A_B(DTSTSFQ) +TRANS_DFP_BF_I_B(DTSTSFI) +TRANS_DFP_BF_I_B(DTSTSFIQ) +TRANS_DFP_T_B_U32_U32_Rc(DQUAI, te, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DQUAIQ, te, rmc) +TRANS_DFP_T_A_B_I32_Rc(DQUA, rmc) +TRANS_DFP_T_A_B_I32_Rc(DQUAQ, rmc) +TRANS_DFP_T_A_B_I32_Rc(DRRND, rmc) +TRANS_DFP_T_A_B_I32_Rc(DRRNDQ, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTX, r, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTXQ, r, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTN, r, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTNQ, r, rmc) +TRANS_DFP_T_B_Rc(DCTDP) +TRANS_DFP_T_B_Rc(DCTQPQ) +TRANS_DFP_T_B_Rc(DRSP) +TRANS_DFP_T_B_Rc(DRDPQ) +TRANS_DFP_T_B_Rc(DCFFIX) +TRANS_DFP_T_B_Rc(DCFFIXQ) +TRANS_DFP_T_B_Rc(DCTFIX) +TRANS_DFP_T_B_Rc(DCTFIXQ) +TRANS_DFP_T_FPR_I32_Rc(DDEDPD, rb, sp) +TRANS_DFP_T_FPR_I32_Rc(DDEDPDQ, rb, sp) +TRANS_DFP_T_FPR_I32_Rc(DENBCD, rb, s) +TRANS_DFP_T_FPR_I32_Rc(DENBCDQ, rb, s) +TRANS_DFP_T_B_Rc(DXEX) +TRANS_DFP_T_B_Rc(DXEXQ) +TRANS_DFP_T_A_B_Rc(DIEX) +TRANS_DFP_T_A_B_Rc(DIEXQ) +TRANS_DFP_T_FPR_I32_Rc(DSCLI, ra, sh) +TRANS_DFP_T_FPR_I32_Rc(DSCLIQ, ra, sh) +TRANS_DFP_T_FPR_I32_Rc(DSCRI, ra, sh) +TRANS_DFP_T_FPR_I32_Rc(DSCRIQ, ra, sh) + +static bool trans_DCFFIXQQ(DisasContext *ctx, arg_DCFFIXQQ *a) +{ + TCGv_ptr rt, rb; + + REQUIRE_INSNS_FLAGS2(ctx, DFP); + REQUIRE_FPU(ctx); + REQUIRE_VECTOR(ctx); + + rt = gen_fprp_ptr(a->frtp); + rb = gen_avr_ptr(a->vrb); + gen_helper_DCFFIXQQ(cpu_env, rt, rb); + tcg_temp_free_ptr(rt); + tcg_temp_free_ptr(rb); + + return true; +} + +static bool trans_DCTFIXQQ(DisasContext *ctx, arg_DCTFIXQQ *a) +{ + TCGv_ptr rt, rb; + + REQUIRE_INSNS_FLAGS2(ctx, DFP); + REQUIRE_FPU(ctx); + REQUIRE_VECTOR(ctx); + + rt = gen_avr_ptr(a->vrt); + rb = gen_fprp_ptr(a->frbp); + gen_helper_DCTFIXQQ(cpu_env, rt, rb); + tcg_temp_free_ptr(rt); + tcg_temp_free_ptr(rb); + + return true; +} diff --git a/target/ppc/translate/dfp-ops.c.inc b/target/ppc/translate/dfp-ops.c.inc deleted file mode 100644 index 6ef38e5712..0000000000 --- a/target/ppc/translate/dfp-ops.c.inc +++ /dev/null @@ -1,165 +0,0 @@ -#define _GEN_DFP_LONG(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_LONG_300(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_ISA300) - -#define _GEN_DFP_LONGx2(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_LONGx4(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_QUAD(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_QUAD_300(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_ISA300) - -#define _GEN_DFP_QUADx2(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_QUADx4(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) - -#define GEN_DFP_T_A_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x00000000) - -#define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00210800) - -#define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00200800) - -#define GEN_DFP_T_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x001F0000) - -#define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x003F0800) - -#define GEN_DFP_Tp_B_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x003F0000) - -#define GEN_DFP_T_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x001F0800) - -#define GEN_DFP_BF_A_B(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x00000001) - -#define GEN_DFP_BF_A_B_300(name, op1, op2) \ -_GEN_DFP_LONG_300(name, op1, op2, 0x00400001) - -#define GEN_DFP_BF_Ap_Bp(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00610801) - -#define GEN_DFP_BF_A_Bp(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00600801) - -#define GEN_DFP_BF_A_Bp_300(name, op1, op2) \ -_GEN_DFP_QUAD_300(name, op1, op2, 0x00400001) - -#define GEN_DFP_BF_A_DCM(name, op1, op2) \ -_GEN_DFP_LONGx2(name, op1, op2, 0x00600001) - -#define GEN_DFP_BF_Ap_DCM(name, op1, op2) \ -_GEN_DFP_QUADx2(name, op1, op2, 0x00610001) - -#define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \ -_GEN_DFP_LONGx4(name, op1, op2, 0x00000000) - -#define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x02010800) - -#define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x02000800) - -#define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \ -_GEN_DFP_LONGx4(name, op1, op2, 0x00000000) - -#define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x00200800) - -#define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \ -_GEN_DFP_LONGx4(name, op1, op2, 0x001E0000) - -#define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x003E0800) - -#define GEN_DFP_SP_T_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x00070000) - -#define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00270800) - -#define GEN_DFP_S_T_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x000F0000) - -#define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x002F0800) - -#define GEN_DFP_T_A_SH_Rc(name, op1, op2) \ -_GEN_DFP_LONGx2(name, op1, op2, 0x00000000) - -#define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \ -_GEN_DFP_QUADx2(name, op1, op2, 0x00210000) - -GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00), -GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00), -GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10), -GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10), -GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01), -GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01), -GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11), -GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11), -GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14), -GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14), -GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04), -GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04), -GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06), -GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06), -GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07), -GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07), -GEN_DFP_BF_A_B(dtstex, 0x02, 0x05), -GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05), -GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15), -GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15), -GEN_DFP_BF_A_B_300(dtstsfi, 0x03, 0x15), -GEN_DFP_BF_A_Bp_300(dtstsfiq, 0x03, 0x15), -GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02), -GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02), -GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00), -GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00), -GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01), -GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01), -GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03), -GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03), -GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07), -GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07), -GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08), -GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08), -GEN_DFP_T_B_Rc(drsp, 0x02, 0x18), -GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18), -GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19), -GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19), -GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09), -GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09), -GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a), -GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a), -GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a), -GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a), -GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b), -GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b), -GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b), -GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b), -GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02), -GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02), -GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03), -GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03), diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc index 2e2518ee15..7fecff4579 100644 --- a/target/ppc/translate/fixedpoint-impl.c.inc +++ b/target/ppc/translate/fixedpoint-impl.c.inc @@ -18,25 +18,6 @@ */ /* - * Incorporate CIA into the constant when R=1. - * Validate that when R=1, RA=0. - */ -static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) -{ - d->rt = a->rt; - d->ra = a->ra; - d->si = a->si; - if (a->r) { - if (unlikely(a->ra != 0)) { - gen_invalid(ctx); - return false; - } - d->si += ctx->cia; - } - return true; -} - -/* * Fixed-Point Load/Store Instructions */ @@ -51,15 +32,7 @@ static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update, } gen_set_access_type(ctx, ACCESS_INT); - ea = tcg_temp_new(); - if (ra) { - tcg_gen_add_tl(ea, cpu_gpr[ra], displ); - } else { - tcg_gen_mov_tl(ea, displ); - } - if (NARROW_MODE(ctx)) { - tcg_gen_ext32u_tl(ea, ea); - } + ea = do_ea_calc(ctx, ra, displ); mop ^= ctx->default_tcg_memop_mask; if (store) { tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop); @@ -96,6 +69,107 @@ static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update, return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop); } +static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed) +{ +#if defined(TARGET_PPC64) + TCGv ea; + TCGv_i64 low_addr_gpr, high_addr_gpr; + MemOp mop; + + REQUIRE_INSNS_FLAGS(ctx, 64BX); + + if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) { + if (ctx->pr) { + /* lq and stq were privileged prior to V. 2.07 */ + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return true; + } + + if (ctx->le_mode) { + gen_align_no_le(ctx); + return true; + } + } + + if (!store && unlikely(a->ra == a->rt)) { + gen_invalid(ctx); + return true; + } + + gen_set_access_type(ctx, ACCESS_INT); + ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si)); + + if (prefixed || !ctx->le_mode) { + low_addr_gpr = cpu_gpr[a->rt]; + high_addr_gpr = cpu_gpr[a->rt + 1]; + } else { + low_addr_gpr = cpu_gpr[a->rt + 1]; + high_addr_gpr = cpu_gpr[a->rt]; + } + + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + if (HAVE_ATOMIC128) { + mop = DEF_MEMOP(MO_128); + TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx)); + if (store) { + if (ctx->le_mode) { + gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr, + high_addr_gpr, oi); + } else { + gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr, + low_addr_gpr, oi); + + } + } else { + if (ctx->le_mode) { + gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi); + tcg_gen_ld_i64(high_addr_gpr, cpu_env, + offsetof(CPUPPCState, retxh)); + } else { + gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi); + tcg_gen_ld_i64(low_addr_gpr, cpu_env, + offsetof(CPUPPCState, retxh)); + } + } + } else { + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + } + } else { + mop = DEF_MEMOP(MO_Q); + if (store) { + tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop); + } else { + tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop); + } + + gen_addr_add(ctx, ea, ea, 8); + + if (store) { + tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop); + } else { + tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop); + } + } + tcg_temp_free(ea); +#else + qemu_build_not_reached(); +#endif + + return true; +} + +static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store) +{ + arg_D d; + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + + return do_ldst_quad(ctx, &d, store, true); +} + /* Load Byte and Zero */ TRANS(LBZ, do_ldst_D, false, false, MO_UB) TRANS(LBZX, do_ldst_X, false, false, MO_UB) @@ -137,6 +211,10 @@ TRANS64(LDU, do_ldst_D, true, false, MO_Q) TRANS64(LDUX, do_ldst_X, true, false, MO_Q) TRANS64(PLD, do_ldst_PLS_D, false, false, MO_Q) +/* Load Quadword */ +TRANS64(LQ, do_ldst_quad, false, false); +TRANS64(PLQ, do_ldst_quad_PLS_D, false); + /* Store Byte */ TRANS(STB, do_ldst_D, false, true, MO_UB) TRANS(STBX, do_ldst_X, false, true, MO_UB) @@ -165,6 +243,10 @@ TRANS64(STDU, do_ldst_D, true, true, MO_Q) TRANS64(STDUX, do_ldst_X, true, true, MO_Q) TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_Q) +/* Store Quadword */ +TRANS64(STQ, do_ldst_quad, true, false); +TRANS64(PSTQ, do_ldst_quad_PLS_D, true); + /* * Fixed-Point Compare Instructions */ @@ -325,7 +407,86 @@ static bool trans_CFUGED(DisasContext *ctx, arg_X *a) REQUIRE_64BIT(ctx); REQUIRE_INSNS_FLAGS2(ctx, ISA310); #if defined(TARGET_PPC64) - gen_helper_cfuged(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); + gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail) +{ + TCGv_i64 t0, t1; + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + tcg_gen_and_i64(t0, src, mask); + if (trail) { + tcg_gen_ctzi_i64(t0, t0, -1); + } else { + tcg_gen_clzi_i64(t0, t0, -1); + } + + tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1); + tcg_gen_andi_i64(t0, t0, 63); + tcg_gen_xori_i64(t0, t0, 63); + if (trail) { + tcg_gen_shl_i64(t0, mask, t0); + tcg_gen_shl_i64(t0, t0, t1); + } else { + tcg_gen_shr_i64(t0, mask, t0); + tcg_gen_shr_i64(t0, t0, t1); + } + + tcg_gen_ctpop_i64(dst, t0); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_PDEPD(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_PEXTD(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); #else qemu_build_not_reached(); #endif diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc index 9f7868ee28..d1dbb1b96b 100644 --- a/target/ppc/translate/fp-impl.c.inc +++ b/target/ppc/translate/fp-impl.c.inc @@ -854,99 +854,6 @@ static void gen_mtfsfi(DisasContext *ctx) gen_helper_float_check_status(cpu_env); } -/*** Floating-point load ***/ -#define GEN_LDF(name, ldop, opc, type) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDUF(name, ldop, opc, type) \ -static void glue(gen_, name##u)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDUXF(name, ldop, opc, type) \ -static void glue(gen_, name##ux)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - t0 = tcg_temp_new_i64(); \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - gen_addr_reg_index(ctx, EA); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDXF(name, ldop, opc2, opc3, type) \ -static void glue(gen_, name##x)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_reg_index(ctx, EA); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDFS(name, ldop, op, type) \ -GEN_LDF(name, ldop, op | 0x20, type); \ -GEN_LDUF(name, ldop, op | 0x21, type); \ -GEN_LDUXF(name, ldop, op | 0x01, type); \ -GEN_LDXF(name, ldop, 0x17, op | 0x00, type) - static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -955,11 +862,6 @@ static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) tcg_temp_free_i32(tmp); } - /* lfd lfdu lfdux lfdx */ -GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT); - /* lfs lfsu lfsux lfsx */ -GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT); - /* lfdepx (external PID lfdx) */ static void gen_lfdepx(DisasContext *ctx) { @@ -1089,73 +991,6 @@ static void gen_lfiwzx(DisasContext *ctx) tcg_temp_free(EA); tcg_temp_free_i64(t0); } -/*** Floating-point store ***/ -#define GEN_STF(name, stop, opc, type) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - get_fpr(t0, rS(ctx->opcode)); \ - gen_qemu_##stop(ctx, t0, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_STUF(name, stop, opc, type) \ -static void glue(gen_, name##u)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - get_fpr(t0, rS(ctx->opcode)); \ - gen_qemu_##stop(ctx, t0, EA); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_STUXF(name, stop, opc, type) \ -static void glue(gen_, name##ux)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_reg_index(ctx, EA); \ - get_fpr(t0, rS(ctx->opcode)); \ - gen_qemu_##stop(ctx, t0, EA); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} #define GEN_STXF(name, stop, opc2, opc3, type) \ static void glue(gen_, name##x)(DisasContext *ctx) \ @@ -1176,12 +1011,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ tcg_temp_free_i64(t0); \ } -#define GEN_STFS(name, stop, op, type) \ -GEN_STF(name, stop, op | 0x20, type); \ -GEN_STUF(name, stop, op | 0x21, type); \ -GEN_STUXF(name, stop, op | 0x01, type); \ -GEN_STXF(name, stop, 0x17, op | 0x00, type) - static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -1190,11 +1019,6 @@ static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) tcg_temp_free_i32(tmp); } -/* stfd stfdu stfdux stfdx */ -GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT); -/* stfs stfsu stfsux stfsx */ -GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT); - /* stfdepx (external PID lfdx) */ static void gen_stfdepx(DisasContext *ctx) { @@ -1473,6 +1297,91 @@ static void gen_stfqx(DisasContext *ctx) tcg_temp_free_i64(t1); } +/* Floating-point Load/Store Instructions */ +static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ, + bool update, bool store, bool single) +{ + TCGv ea; + TCGv_i64 t0; + REQUIRE_INSNS_FLAGS(ctx, FLOAT); + REQUIRE_FPU(ctx); + if (update && ra == 0) { + gen_invalid(ctx); + return true; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + t0 = tcg_temp_new_i64(); + ea = do_ea_calc(ctx, ra, displ); + if (store) { + get_fpr(t0, rt); + if (single) { + gen_qemu_st32fs(ctx, t0, ea); + } else { + gen_qemu_st64_i64(ctx, t0, ea); + } + } else { + if (single) { + gen_qemu_ld32fs(ctx, t0, ea); + } else { + gen_qemu_ld64_i64(ctx, t0, ea); + } + set_fpr(rt, t0); + } + if (update) { + tcg_gen_mov_tl(cpu_gpr[rt], ea); + } + tcg_temp_free_i64(t0); + tcg_temp_free(ea); + return true; +} + +static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store, + bool single) +{ + return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, + single); +} + +static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update, + bool store, bool single) +{ + arg_D d; + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + return do_lsfp_D(ctx, &d, update, store, single); +} + +static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update, + bool store, bool single) +{ + return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single); +} + +TRANS(LFS, do_lsfp_D, false, false, true) +TRANS(LFSU, do_lsfp_D, true, false, true) +TRANS(LFSX, do_lsfp_X, false, false, true) +TRANS(LFSUX, do_lsfp_X, true, false, true) +TRANS(PLFS, do_lsfp_PLS_D, false, false, true) + +TRANS(LFD, do_lsfp_D, false, false, false) +TRANS(LFDU, do_lsfp_D, true, false, false) +TRANS(LFDX, do_lsfp_X, false, false, false) +TRANS(LFDUX, do_lsfp_X, true, false, false) +TRANS(PLFD, do_lsfp_PLS_D, false, false, false) + +TRANS(STFS, do_lsfp_D, false, true, true) +TRANS(STFSU, do_lsfp_D, true, true, true) +TRANS(STFSX, do_lsfp_X, false, true, true) +TRANS(STFSUX, do_lsfp_X, true, true, true) +TRANS(PSTFS, do_lsfp_PLS_D, false, true, true) + +TRANS(STFD, do_lsfp_D, false, true, false) +TRANS(STFDU, do_lsfp_D, true, true, false) +TRANS(STFDX, do_lsfp_X, false, true, false) +TRANS(STFDUX, do_lsfp_X, true, true, false) +TRANS(PSTFD, do_lsfp_PLS_D, false, true, false) + #undef _GEN_FLOAT_ACB #undef GEN_FLOAT_ACB #undef _GEN_FLOAT_AB diff --git a/target/ppc/translate/fp-ops.c.inc b/target/ppc/translate/fp-ops.c.inc index 88fab65628..4260635a12 100644 --- a/target/ppc/translate/fp-ops.c.inc +++ b/target/ppc/translate/fp-ops.c.inc @@ -50,43 +50,14 @@ GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT), GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT), GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT), -#define GEN_LDF(name, ldop, opc, type) \ -GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_LDUF(name, ldop, opc, type) \ -GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_LDUXF(name, ldop, opc, type) \ -GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), -#define GEN_LDXF(name, ldop, opc2, opc3, type) \ -GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), -#define GEN_LDFS(name, ldop, op, type) \ -GEN_LDF(name, ldop, op | 0x20, type) \ -GEN_LDUF(name, ldop, op | 0x21, type) \ -GEN_LDUXF(name, ldop, op | 0x01, type) \ -GEN_LDXF(name, ldop, 0x17, op | 0x00, type) - -GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT) -GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT) GEN_HANDLER_E(lfdepx, 0x1F, 0x1F, 0x12, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205), -#define GEN_STF(name, stop, opc, type) \ -GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_STUF(name, stop, opc, type) \ -GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_STUXF(name, stop, opc, type) \ -GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), #define GEN_STXF(name, stop, opc2, opc3, type) \ GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), -#define GEN_STFS(name, stop, op, type) \ -GEN_STF(name, stop, op | 0x20, type) \ -GEN_STUF(name, stop, op | 0x21, type) \ -GEN_STUXF(name, stop, op | 0x01, type) \ -GEN_STXF(name, stop, 0x17, op | 0x00, type) -GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT) -GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT) GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX) GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205), diff --git a/target/ppc/translate/vector-impl.c.inc b/target/ppc/translate/vector-impl.c.inc deleted file mode 100644 index 117ce9b137..0000000000 --- a/target/ppc/translate/vector-impl.c.inc +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Power ISA decode for Vector Facility instructions - * - * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br) - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see <http://www.gnu.org/licenses/>. - */ - -#define REQUIRE_ALTIVEC(CTX) \ - do { \ - if (unlikely(!(CTX)->altivec_enabled)) { \ - gen_exception((CTX), POWERPC_EXCP_VPU); \ - return true; \ - } \ - } while (0) - -static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a) -{ - TCGv_i64 tgt, src, mask; - - REQUIRE_INSNS_FLAGS2(ctx, ISA310); - REQUIRE_ALTIVEC(ctx); - - tgt = tcg_temp_new_i64(); - src = tcg_temp_new_i64(); - mask = tcg_temp_new_i64(); - - /* centrifuge lower double word */ - get_cpu_vsrl(src, a->vra + 32); - get_cpu_vsrl(mask, a->vrb + 32); - gen_helper_cfuged(tgt, src, mask); - set_cpu_vsrl(a->vrt + 32, tgt); - - /* centrifuge higher double word */ - get_cpu_vsrh(src, a->vra + 32); - get_cpu_vsrh(mask, a->vrb + 32); - gen_helper_cfuged(tgt, src, mask); - set_cpu_vsrh(a->vrt + 32, tgt); - - tcg_temp_free_i64(tgt); - tcg_temp_free_i64(src); - tcg_temp_free_i64(mask); - - return true; -} diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 92b9527aff..8eb8d3a067 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -1217,10 +1217,6 @@ GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15); GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14); GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12); GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8); -GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15); -GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14); -GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12); -GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8); GEN_VXFORM_UIMM_ENV(vcfux, 5, 12); GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13); GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14); @@ -1231,12 +1227,184 @@ GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE, vextractuh, PPC_NONE, PPC2_ISA300); GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE, vextractuw, PPC_NONE, PPC2_ISA300); -GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE, - vinsertb, PPC_NONE, PPC2_ISA300); -GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE, - vinserth, PPC_NONE, PPC2_ISA300); -GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE, - vinsertw, PPC_NONE, PPC2_ISA300); + +static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv)) +{ + TCGv_ptr vrt, vra, vrb; + TCGv rc; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + vrt = gen_avr_ptr(a->vrt); + vra = gen_avr_ptr(a->vra); + vrb = gen_avr_ptr(a->vrb); + rc = tcg_temp_new(); + + tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F); + if (right) { + tcg_gen_subfi_tl(rc, 32 - size, rc); + } + gen_helper(cpu_env, vrt, vra, vrb, rc); + + tcg_temp_free_ptr(vrt); + tcg_temp_free_ptr(vra); + tcg_temp_free_ptr(vrb); + tcg_temp_free(rc); + return true; +} + +TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX) +TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX) +TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX) +TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX) + +TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX) +TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX) +TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX) +TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX) + +static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, + TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + TCGv_ptr t; + TCGv idx; + + t = gen_avr_ptr(vrt); + idx = tcg_temp_new(); + + tcg_gen_andi_tl(idx, ra, 0xF); + if (right) { + tcg_gen_subfi_tl(idx, 16 - size, idx); + } + + gen_helper(cpu_env, t, rb, idx); + + tcg_temp_free_ptr(t); + tcg_temp_free(idx); + + return true; +} + +static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, + int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + bool ok; + TCGv_i64 val; + + val = tcg_temp_new_i64(); + get_avr64(val, vrb, true); + ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper); + + tcg_temp_free_i64(val); + return ok; +} + +static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + bool ok; + TCGv_i64 val; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + val = tcg_temp_new_i64(); + tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); + + ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper); + + tcg_temp_free_i64(val); + return ok; +} + +static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb, + gen_helper); +} + +static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + bool ok; + TCGv_i64 val; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + if (a->uim > (16 - size)) { + /* + * PowerISA v3.1 says that the resulting value is undefined in this + * case, so just log a guest error and leave VRT unchanged. The + * real hardware would do a partial insert, e.g. if VRT is zeroed and + * RB is 0x12345678, executing "vinsw VRT,RB,14" results in + * VRT = 0x0000...00001234, but we don't bother to reproduce this + * behavior as software shouldn't rely on it. + */ + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at" + " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim, + 16 - size); + return true; + } + + val = tcg_temp_new_i64(); + tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); + + ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val, + gen_helper); + + tcg_temp_free_i64(val); + return ok; +} + +static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VECTOR(ctx); + + if (a->uim > (16 - size)) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at" + " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim, + 16 - size); + return true; + } + + return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb, + gen_helper); +} + +TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX) +TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX) +TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX) +TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX) + +TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX) +TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX) +TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX) +TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX) + +TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX) +TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX) + +TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX) +TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX) +TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX) + +TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX) +TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX) +TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX) + +TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX) +TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX) +TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX) +TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX) static void gen_vsldoi(DisasContext *ctx) { @@ -1257,6 +1425,72 @@ static void gen_vsldoi(DisasContext *ctx) tcg_temp_free_i32(sh); } +static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a) +{ + TCGv_i64 t0, t1, t2; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(t0, a->vra, true); + get_avr64(t1, a->vra, false); + + if (a->sh != 0) { + t2 = tcg_temp_new_i64(); + + get_avr64(t2, a->vrb, true); + + tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh); + tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh); + + tcg_temp_free_i64(t2); + } + + set_avr64(a->vrt, t0, true); + set_avr64(a->vrt, t1, false); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + +static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a) +{ + TCGv_i64 t2, t1, t0; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(t0, a->vrb, false); + get_avr64(t1, a->vrb, true); + + if (a->sh != 0) { + t2 = tcg_temp_new_i64(); + + get_avr64(t2, a->vra, false); + + tcg_gen_extract2_i64(t0, t0, t1, a->sh); + tcg_gen_extract2_i64(t1, t1, t2, a->sh); + + tcg_temp_free_i64(t2); + } + + set_avr64(a->vrt, t0, false); + set_avr64(a->vrt, t1, true); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + #define GEN_VAFORM_PAIRED(name0, name1, opc2) \ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ @@ -1559,6 +1793,86 @@ GEN_VXFORM3(vpermxor, 22, 0xFF) GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE, vpermxor, PPC_NONE, PPC2_ALTIVEC_207) +static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3 g = { + .fni8 = gen_helper_CFUGED, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &g); + + return true; +} + +static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3i g = { + .fni8 = do_cntzdm, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, false, &g); + + return true; +} + +static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3i g = { + .fni8 = do_cntzdm, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, true, &g); + + return true; +} + +static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3 g = { + .fni8 = gen_helper_PDEPD, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &g); + + return true; +} + +static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3 g = { + .fni8 = gen_helper_PEXTD, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &g); + + return true; +} + #undef GEN_VR_LDX #undef GEN_VR_STX #undef GEN_VR_LVE diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc index f3f4855111..25ee715b43 100644 --- a/target/ppc/translate/vmx-ops.c.inc +++ b/target/ppc/translate/vmx-ops.c.inc @@ -225,13 +225,9 @@ GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000, GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000, PPC_ALTIVEC), GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000), -GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000, - PPC_ALTIVEC), -GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000, - PPC_ALTIVEC), -GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000, - PPC_ALTIVEC), -GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000), +GEN_VXFORM(vspltisb, 6, 12), +GEN_VXFORM(vspltish, 6, 13), +GEN_VXFORM(vspltisw, 6, 14), GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06), GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07), GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10), diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc index 57a7f73bba..c0e38060b4 100644 --- a/target/ppc/translate/vsx-impl.c.inc +++ b/target/ppc/translate/vsx-impl.c.inc @@ -1,23 +1,13 @@ /*** VSX extension ***/ -static inline void get_cpu_vsrh(TCGv_i64 dst, int n) +static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high) { - tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, true)); + tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, high)); } -static inline void get_cpu_vsrl(TCGv_i64 dst, int n) +static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high) { - tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, false)); -} - -static inline void set_cpu_vsrh(int n, TCGv_i64 src) -{ - tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, true)); -} - -static inline void set_cpu_vsrl(int n, TCGv_i64 src) -{ - tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, false)); + tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, high)); } static inline TCGv_ptr gen_vsr_ptr(int reg) @@ -41,7 +31,7 @@ static void gen_##name(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##operation(ctx, t0, EA); \ - set_cpu_vsrh(xT(ctx->opcode), t0); \ + set_cpu_vsr(xT(ctx->opcode), t0, true); \ /* NOTE: cpu_vsrl is undefined */ \ tcg_temp_free(EA); \ tcg_temp_free_i64(t0); \ @@ -67,10 +57,10 @@ static void gen_lxvd2x(DisasContext *ctx) EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); gen_qemu_ld64_i64(ctx, t0, EA); - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_gen_addi_tl(EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); - set_cpu_vsrl(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, false); tcg_temp_free(EA); tcg_temp_free_i64(t0); } @@ -109,8 +99,8 @@ static void gen_lxvw4x(DisasContext *ctx) tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); } - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free(EA); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -233,8 +223,8 @@ static void gen_lxvh8x(DisasContext *ctx) if (ctx->le_mode) { gen_bswap16x8(xth, xtl, xth, xtl); } - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free(EA); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -258,121 +248,13 @@ static void gen_lxvb16x(DisasContext *ctx) tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free(EA); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); } -#define VSX_VECTOR_LOAD(name, op, indexed) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - int xt; \ - TCGv EA; \ - TCGv_i64 xth; \ - TCGv_i64 xtl; \ - \ - if (indexed) { \ - xt = xT(ctx->opcode); \ - } else { \ - xt = DQxT(ctx->opcode); \ - } \ - \ - if (xt < 32) { \ - if (unlikely(!ctx->vsx_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VSXU); \ - return; \ - } \ - } else { \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - } \ - xth = tcg_temp_new_i64(); \ - xtl = tcg_temp_new_i64(); \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - if (indexed) { \ - gen_addr_reg_index(ctx, EA); \ - } else { \ - gen_addr_imm_index(ctx, EA, 0x0F); \ - } \ - if (ctx->le_mode) { \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ); \ - set_cpu_vsrl(xt, xtl); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ); \ - set_cpu_vsrh(xt, xth); \ - } else { \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ); \ - set_cpu_vsrh(xt, xth); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ); \ - set_cpu_vsrl(xt, xtl); \ - } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(xth); \ - tcg_temp_free_i64(xtl); \ -} - -VSX_VECTOR_LOAD(lxv, ld_i64, 0) -VSX_VECTOR_LOAD(lxvx, ld_i64, 1) - -#define VSX_VECTOR_STORE(name, op, indexed) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - int xt; \ - TCGv EA; \ - TCGv_i64 xth; \ - TCGv_i64 xtl; \ - \ - if (indexed) { \ - xt = xT(ctx->opcode); \ - } else { \ - xt = DQxT(ctx->opcode); \ - } \ - \ - if (xt < 32) { \ - if (unlikely(!ctx->vsx_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VSXU); \ - return; \ - } \ - } else { \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - } \ - xth = tcg_temp_new_i64(); \ - xtl = tcg_temp_new_i64(); \ - get_cpu_vsrh(xth, xt); \ - get_cpu_vsrl(xtl, xt); \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - if (indexed) { \ - gen_addr_reg_index(ctx, EA); \ - } else { \ - gen_addr_imm_index(ctx, EA, 0x0F); \ - } \ - if (ctx->le_mode) { \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ); \ - } else { \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ); \ - } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(xth); \ - tcg_temp_free_i64(xtl); \ -} - -VSX_VECTOR_STORE(stxv, st_i64, 0) -VSX_VECTOR_STORE(stxvx, st_i64, 1) - #ifdef TARGET_PPC64 #define VSX_VECTOR_LOAD_STORE_LENGTH(name) \ static void gen_##name(DisasContext *ctx) \ @@ -421,7 +303,7 @@ static void gen_##name(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_imm_index(ctx, EA, 0x03); \ gen_qemu_##operation(ctx, xth, EA); \ - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); \ + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); \ /* NOTE: cpu_vsrl is undefined */ \ tcg_temp_free(EA); \ tcg_temp_free_i64(xth); \ @@ -443,7 +325,7 @@ static void gen_##name(DisasContext *ctx) \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ - get_cpu_vsrh(t0, xS(ctx->opcode)); \ + get_cpu_vsr(t0, xS(ctx->opcode), true); \ gen_qemu_##operation(ctx, t0, EA); \ tcg_temp_free(EA); \ tcg_temp_free_i64(t0); \ @@ -468,10 +350,10 @@ static void gen_stxvd2x(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); - get_cpu_vsrh(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), true); gen_qemu_st64_i64(ctx, t0, EA); tcg_gen_addi_tl(EA, EA, 8); - get_cpu_vsrl(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), false); gen_qemu_st64_i64(ctx, t0, EA); tcg_temp_free(EA); tcg_temp_free_i64(t0); @@ -489,8 +371,8 @@ static void gen_stxvw4x(DisasContext *ctx) } xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); - get_cpu_vsrl(xsl, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); + get_cpu_vsr(xsl, xS(ctx->opcode), false); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -529,8 +411,8 @@ static void gen_stxvh8x(DisasContext *ctx) } xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); - get_cpu_vsrl(xsl, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); + get_cpu_vsr(xsl, xS(ctx->opcode), false); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -566,8 +448,8 @@ static void gen_stxvb16x(DisasContext *ctx) } xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); - get_cpu_vsrl(xsl, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); + get_cpu_vsr(xsl, xS(ctx->opcode), false); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -590,7 +472,7 @@ static void gen_##name(DisasContext *ctx) \ return; \ } \ xth = tcg_temp_new_i64(); \ - get_cpu_vsrh(xth, rD(ctx->opcode) + 32); \ + get_cpu_vsr(xth, rD(ctx->opcode) + 32, true); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_imm_index(ctx, EA, 0x03); \ @@ -618,7 +500,7 @@ static void gen_mfvsrwz(DisasContext *ctx) } TCGv_i64 tmp = tcg_temp_new_i64(); TCGv_i64 xsh = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); tcg_gen_ext32u_i64(tmp, xsh); tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp); tcg_temp_free_i64(tmp); @@ -642,7 +524,7 @@ static void gen_mtvsrwa(DisasContext *ctx) TCGv_i64 xsh = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_i64(xsh, tmp); - set_cpu_vsrh(xT(ctx->opcode), xsh); + set_cpu_vsr(xT(ctx->opcode), xsh, true); tcg_temp_free_i64(tmp); tcg_temp_free_i64(xsh); } @@ -664,7 +546,7 @@ static void gen_mtvsrwz(DisasContext *ctx) TCGv_i64 xsh = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32u_i64(xsh, tmp); - set_cpu_vsrh(xT(ctx->opcode), xsh); + set_cpu_vsr(xT(ctx->opcode), xsh, true); tcg_temp_free_i64(tmp); tcg_temp_free_i64(xsh); } @@ -685,7 +567,7 @@ static void gen_mfvsrd(DisasContext *ctx) } } t0 = tcg_temp_new_i64(); - get_cpu_vsrh(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), true); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free_i64(t0); } @@ -706,7 +588,7 @@ static void gen_mtvsrd(DisasContext *ctx) } t0 = tcg_temp_new_i64(); tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_temp_free_i64(t0); } @@ -725,7 +607,7 @@ static void gen_mfvsrld(DisasContext *ctx) } } t0 = tcg_temp_new_i64(); - get_cpu_vsrl(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), false); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free_i64(t0); } @@ -751,10 +633,10 @@ static void gen_mtvsrdd(DisasContext *ctx) } else { tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); } - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]); - set_cpu_vsrl(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, false); tcg_temp_free_i64(t0); } @@ -776,8 +658,8 @@ static void gen_mtvsrws(DisasContext *ctx) t0 = tcg_temp_new_i64(); tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32, 32); - set_cpu_vsrl(xT(ctx->opcode), t0); - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, false); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_temp_free_i64(t0); } @@ -797,33 +679,25 @@ static void gen_xxpermdi(DisasContext *ctx) if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) || (xT(ctx->opcode) == xB(ctx->opcode)))) { - if ((DM(ctx->opcode) & 2) == 0) { - get_cpu_vsrh(xh, xA(ctx->opcode)); - } else { - get_cpu_vsrl(xh, xA(ctx->opcode)); - } - if ((DM(ctx->opcode) & 1) == 0) { - get_cpu_vsrh(xl, xB(ctx->opcode)); - } else { - get_cpu_vsrl(xl, xB(ctx->opcode)); - } + get_cpu_vsr(xh, xA(ctx->opcode), (DM(ctx->opcode) & 2) == 0); + get_cpu_vsr(xl, xB(ctx->opcode), (DM(ctx->opcode) & 1) == 0); - set_cpu_vsrh(xT(ctx->opcode), xh); - set_cpu_vsrl(xT(ctx->opcode), xl); + set_cpu_vsr(xT(ctx->opcode), xh, true); + set_cpu_vsr(xT(ctx->opcode), xl, false); } else { if ((DM(ctx->opcode) & 2) == 0) { - get_cpu_vsrh(xh, xA(ctx->opcode)); - set_cpu_vsrh(xT(ctx->opcode), xh); + get_cpu_vsr(xh, xA(ctx->opcode), true); + set_cpu_vsr(xT(ctx->opcode), xh, true); } else { - get_cpu_vsrl(xh, xA(ctx->opcode)); - set_cpu_vsrh(xT(ctx->opcode), xh); + get_cpu_vsr(xh, xA(ctx->opcode), false); + set_cpu_vsr(xT(ctx->opcode), xh, true); } if ((DM(ctx->opcode) & 1) == 0) { - get_cpu_vsrh(xl, xB(ctx->opcode)); - set_cpu_vsrl(xT(ctx->opcode), xl); + get_cpu_vsr(xl, xB(ctx->opcode), true); + set_cpu_vsr(xT(ctx->opcode), xl, false); } else { - get_cpu_vsrl(xl, xB(ctx->opcode)); - set_cpu_vsrl(xT(ctx->opcode), xl); + get_cpu_vsr(xl, xB(ctx->opcode), false); + set_cpu_vsr(xT(ctx->opcode), xl, false); } } tcg_temp_free_i64(xh); @@ -847,7 +721,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ xb = tcg_temp_new_i64(); \ sgm = tcg_temp_new_i64(); \ - get_cpu_vsrh(xb, xB(ctx->opcode)); \ + get_cpu_vsr(xb, xB(ctx->opcode), true); \ tcg_gen_movi_i64(sgm, sgn_mask); \ switch (op) { \ case OP_ABS: { \ @@ -864,7 +738,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ case OP_CPSGN: { \ TCGv_i64 xa = tcg_temp_new_i64(); \ - get_cpu_vsrh(xa, xA(ctx->opcode)); \ + get_cpu_vsr(xa, xA(ctx->opcode), true); \ tcg_gen_and_i64(xa, xa, sgm); \ tcg_gen_andc_i64(xb, xb, sgm); \ tcg_gen_or_i64(xb, xb, xa); \ @@ -872,7 +746,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ break; \ } \ } \ - set_cpu_vsrh(xT(ctx->opcode), xb); \ + set_cpu_vsr(xT(ctx->opcode), xb, true); \ tcg_temp_free_i64(xb); \ tcg_temp_free_i64(sgm); \ } @@ -898,8 +772,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ xbl = tcg_temp_new_i64(); \ sgm = tcg_temp_new_i64(); \ tmp = tcg_temp_new_i64(); \ - get_cpu_vsrh(xbh, xb); \ - get_cpu_vsrl(xbl, xb); \ + get_cpu_vsr(xbh, xb, true); \ + get_cpu_vsr(xbl, xb, false); \ tcg_gen_movi_i64(sgm, sgn_mask); \ switch (op) { \ case OP_ABS: \ @@ -914,15 +788,15 @@ static void glue(gen_, name)(DisasContext *ctx) \ case OP_CPSGN: \ xah = tcg_temp_new_i64(); \ xa = rA(ctx->opcode) + 32; \ - get_cpu_vsrh(tmp, xa); \ + get_cpu_vsr(tmp, xa, true); \ tcg_gen_and_i64(xah, tmp, sgm); \ tcg_gen_andc_i64(xbh, xbh, sgm); \ tcg_gen_or_i64(xbh, xbh, xah); \ tcg_temp_free_i64(xah); \ break; \ } \ - set_cpu_vsrh(xt, xbh); \ - set_cpu_vsrl(xt, xbl); \ + set_cpu_vsr(xt, xbh, true); \ + set_cpu_vsr(xt, xbl, false); \ tcg_temp_free_i64(xbl); \ tcg_temp_free_i64(xbh); \ tcg_temp_free_i64(sgm); \ @@ -945,8 +819,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ xbh = tcg_temp_new_i64(); \ xbl = tcg_temp_new_i64(); \ sgm = tcg_temp_new_i64(); \ - get_cpu_vsrh(xbh, xB(ctx->opcode)); \ - get_cpu_vsrl(xbl, xB(ctx->opcode)); \ + get_cpu_vsr(xbh, xB(ctx->opcode), true); \ + get_cpu_vsr(xbl, xB(ctx->opcode), false); \ tcg_gen_movi_i64(sgm, sgn_mask); \ switch (op) { \ case OP_ABS: { \ @@ -967,8 +841,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ case OP_CPSGN: { \ TCGv_i64 xah = tcg_temp_new_i64(); \ TCGv_i64 xal = tcg_temp_new_i64(); \ - get_cpu_vsrh(xah, xA(ctx->opcode)); \ - get_cpu_vsrl(xal, xA(ctx->opcode)); \ + get_cpu_vsr(xah, xA(ctx->opcode), true); \ + get_cpu_vsr(xal, xA(ctx->opcode), false); \ tcg_gen_and_i64(xah, xah, sgm); \ tcg_gen_and_i64(xal, xal, sgm); \ tcg_gen_andc_i64(xbh, xbh, sgm); \ @@ -980,8 +854,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ break; \ } \ } \ - set_cpu_vsrh(xT(ctx->opcode), xbh); \ - set_cpu_vsrl(xT(ctx->opcode), xbl); \ + set_cpu_vsr(xT(ctx->opcode), xbh, true); \ + set_cpu_vsr(xT(ctx->opcode), xbl, false); \ tcg_temp_free_i64(xbh); \ tcg_temp_free_i64(xbl); \ tcg_temp_free_i64(sgm); \ @@ -1193,9 +1067,9 @@ static void gen_##name(DisasContext *ctx) \ } \ t0 = tcg_temp_new_i64(); \ t1 = tcg_temp_new_i64(); \ - get_cpu_vsrh(t0, xB(ctx->opcode)); \ + get_cpu_vsr(t0, xB(ctx->opcode), true); \ gen_helper_##name(t1, cpu_env, t0); \ - set_cpu_vsrh(xT(ctx->opcode), t1); \ + set_cpu_vsr(xT(ctx->opcode), t1, true); \ tcg_temp_free_i64(t0); \ tcg_temp_free_i64(t1); \ } @@ -1390,13 +1264,13 @@ static void gen_xxbrd(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_bswap64_i64(xth, xbh); tcg_gen_bswap64_i64(xtl, xbl); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1419,12 +1293,12 @@ static void gen_xxbrh(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); gen_bswap16x8(xth, xtl, xbh, xbl); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1448,15 +1322,15 @@ static void gen_xxbrq(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); t0 = tcg_temp_new_i64(); tcg_gen_bswap64_i64(t0, xbl); tcg_gen_bswap64_i64(xtl, xbh); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_gen_mov_i64(xth, t0); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1480,12 +1354,12 @@ static void gen_xxbrw(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); gen_bswap32x4(xth, xtl, xbh, xbl); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1527,23 +1401,16 @@ static void glue(gen_, name)(DisasContext *ctx) \ b0 = tcg_temp_new_i64(); \ b1 = tcg_temp_new_i64(); \ tmp = tcg_temp_new_i64(); \ - if (high) { \ - get_cpu_vsrh(a0, xA(ctx->opcode)); \ - get_cpu_vsrh(a1, xA(ctx->opcode)); \ - get_cpu_vsrh(b0, xB(ctx->opcode)); \ - get_cpu_vsrh(b1, xB(ctx->opcode)); \ - } else { \ - get_cpu_vsrl(a0, xA(ctx->opcode)); \ - get_cpu_vsrl(a1, xA(ctx->opcode)); \ - get_cpu_vsrl(b0, xB(ctx->opcode)); \ - get_cpu_vsrl(b1, xB(ctx->opcode)); \ - } \ + get_cpu_vsr(a0, xA(ctx->opcode), high); \ + get_cpu_vsr(a1, xA(ctx->opcode), high); \ + get_cpu_vsr(b0, xB(ctx->opcode), high); \ + get_cpu_vsr(b1, xB(ctx->opcode), high); \ tcg_gen_shri_i64(a0, a0, 32); \ tcg_gen_shri_i64(b0, b0, 32); \ tcg_gen_deposit_i64(tmp, b0, a0, 32, 32); \ - set_cpu_vsrh(xT(ctx->opcode), tmp); \ + set_cpu_vsr(xT(ctx->opcode), tmp, true); \ tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \ - set_cpu_vsrl(xT(ctx->opcode), tmp); \ + set_cpu_vsr(xT(ctx->opcode), tmp, false); \ tcg_temp_free_i64(a0); \ tcg_temp_free_i64(a1); \ tcg_temp_free_i64(b0); \ @@ -1569,47 +1436,114 @@ static void gen_xxsel(DisasContext *ctx) vsr_full_offset(rb), vsr_full_offset(ra), 16, 16); } -static void gen_xxspltw(DisasContext *ctx) +static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2 *a) { - int rt = xT(ctx->opcode); - int rb = xB(ctx->opcode); - int uim = UIM(ctx->opcode); int tofs, bofs; - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } + REQUIRE_VSX(ctx); - tofs = vsr_full_offset(rt); - bofs = vsr_full_offset(rb); - bofs += uim << MO_32; + tofs = vsr_full_offset(a->xt); + bofs = vsr_full_offset(a->xb); + bofs += a->uim << MO_32; #ifndef HOST_WORDS_BIG_ENDIAN bofs ^= 8 | 4; #endif tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16); + return true; } #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff)) -static void gen_xxspltib(DisasContext *ctx) +static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a) { - uint8_t uim8 = IMM8(ctx->opcode); - int rt = xT(ctx->opcode); + if (a->xt < 32) { + REQUIRE_VSX(ctx); + } else { + REQUIRE_VECTOR(ctx); + } + tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm); + return true; +} - if (rt < 32) { - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } +static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si); + + return true; +} + +static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16, + helper_todouble(a->si)); + return true; +} + +static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a) +{ + TCGv_i32 imm; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + imm = tcg_constant_i32(a->si); + + tcg_gen_st_i32(imm, cpu_env, + offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix))); + tcg_gen_st_i32(imm, cpu_env, + offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix))); + + return true; +} + +static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a) +{ + static const uint64_t values[32] = { + 0, /* Unspecified */ + 0x3FFF000000000000llu, /* QP +1.0 */ + 0x4000000000000000llu, /* QP +2.0 */ + 0x4000800000000000llu, /* QP +3.0 */ + 0x4001000000000000llu, /* QP +4.0 */ + 0x4001400000000000llu, /* QP +5.0 */ + 0x4001800000000000llu, /* QP +6.0 */ + 0x4001C00000000000llu, /* QP +7.0 */ + 0x7FFF000000000000llu, /* QP +Inf */ + 0x7FFF800000000000llu, /* QP dQNaN */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0x8000000000000000llu, /* QP -0.0 */ + 0xBFFF000000000000llu, /* QP -1.0 */ + 0xC000000000000000llu, /* QP -2.0 */ + 0xC000800000000000llu, /* QP -3.0 */ + 0xC001000000000000llu, /* QP -4.0 */ + 0xC001400000000000llu, /* QP -5.0 */ + 0xC001800000000000llu, /* QP -6.0 */ + 0xC001C00000000000llu, /* QP -7.0 */ + 0xFFFF000000000000llu, /* QP -Inf */ + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + if (values[a->uim]) { + set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false); + set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true); } else { - if (unlikely(!ctx->altivec_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VPU); - return; - } + gen_invalid(ctx); } - tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(rt), 16, 16, uim8); + + return true; } static void gen_xxsldwi(DisasContext *ctx) @@ -1624,40 +1558,40 @@ static void gen_xxsldwi(DisasContext *ctx) switch (SHW(ctx->opcode)) { case 0: { - get_cpu_vsrh(xth, xA(ctx->opcode)); - get_cpu_vsrl(xtl, xA(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), true); + get_cpu_vsr(xtl, xA(ctx->opcode), false); break; } case 1: { TCGv_i64 t0 = tcg_temp_new_i64(); - get_cpu_vsrh(xth, xA(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), true); tcg_gen_shli_i64(xth, xth, 32); - get_cpu_vsrl(t0, xA(ctx->opcode)); + get_cpu_vsr(t0, xA(ctx->opcode), false); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xth, xth, t0); - get_cpu_vsrl(xtl, xA(ctx->opcode)); + get_cpu_vsr(xtl, xA(ctx->opcode), false); tcg_gen_shli_i64(xtl, xtl, 32); - get_cpu_vsrh(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); tcg_temp_free_i64(t0); break; } case 2: { - get_cpu_vsrl(xth, xA(ctx->opcode)); - get_cpu_vsrh(xtl, xB(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), false); + get_cpu_vsr(xtl, xB(ctx->opcode), true); break; } case 3: { TCGv_i64 t0 = tcg_temp_new_i64(); - get_cpu_vsrl(xth, xA(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), false); tcg_gen_shli_i64(xth, xth, 32); - get_cpu_vsrh(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xth, xth, t0); - get_cpu_vsrh(xtl, xB(ctx->opcode)); + get_cpu_vsr(xtl, xB(ctx->opcode), true); tcg_gen_shli_i64(xtl, xtl, 32); - get_cpu_vsrl(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), false); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); tcg_temp_free_i64(t0); @@ -1665,8 +1599,8 @@ static void gen_xxsldwi(DisasContext *ctx) } } - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1694,8 +1628,8 @@ static void gen_##name(DisasContext *ctx) \ */ \ if (uimm > 15) { \ tcg_gen_movi_i64(t1, 0); \ - set_cpu_vsrh(xT(ctx->opcode), t1); \ - set_cpu_vsrl(xT(ctx->opcode), t1); \ + set_cpu_vsr(xT(ctx->opcode), t1, true); \ + set_cpu_vsr(xT(ctx->opcode), t1, false); \ return; \ } \ tcg_gen_movi_i32(t0, uimm); \ @@ -1719,7 +1653,7 @@ static void gen_xsxexpdp(DisasContext *ctx) return; } t0 = tcg_temp_new_i64(); - get_cpu_vsrh(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_extract_i64(rt, t0, 52, 11); tcg_temp_free_i64(t0); } @@ -1737,12 +1671,12 @@ static void gen_xsxexpqp(DisasContext *ctx) xth = tcg_temp_new_i64(); xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, rB(ctx->opcode) + 32); + get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); tcg_gen_extract_i64(xth, xbh, 48, 15); - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_movi_i64(xtl, 0); - set_cpu_vsrl(rD(ctx->opcode) + 32, xtl); + set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); tcg_temp_free_i64(xbh); tcg_temp_free_i64(xth); @@ -1766,7 +1700,7 @@ static void gen_xsiexpdp(DisasContext *ctx) tcg_gen_andi_i64(t0, rb, 0x7FF); tcg_gen_shli_i64(t0, t0, 52); tcg_gen_or_i64(xth, xth, t0); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); /* dword[1] is undefined */ tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1789,19 +1723,19 @@ static void gen_xsiexpqp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xah = tcg_temp_new_i64(); xal = tcg_temp_new_i64(); - get_cpu_vsrh(xah, rA(ctx->opcode) + 32); - get_cpu_vsrl(xal, rA(ctx->opcode) + 32); + get_cpu_vsr(xah, rA(ctx->opcode) + 32, true); + get_cpu_vsr(xal, rA(ctx->opcode) + 32, false); xbh = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, rB(ctx->opcode) + 32); + get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF); tcg_gen_andi_i64(t0, xbh, 0x7FFF); tcg_gen_shli_i64(t0, t0, 48); tcg_gen_or_i64(xth, xth, t0); - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xal); - set_cpu_vsrl(rD(ctx->opcode) + 32, xtl); + set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1826,12 +1760,12 @@ static void gen_xsxsigdp(DisasContext *ctx) zr = tcg_const_i64(0); nan = tcg_const_i64(2047); - get_cpu_vsrh(t1, xB(ctx->opcode)); + get_cpu_vsr(t1, xB(ctx->opcode), true); tcg_gen_extract_i64(exp, t1, 52, 11); tcg_gen_movi_i64(t0, 0x0010000000000000); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); - get_cpu_vsrh(t1, xB(ctx->opcode)); + get_cpu_vsr(t1, xB(ctx->opcode), true); tcg_gen_deposit_i64(rt, t0, t1, 0, 52); tcg_temp_free_i64(t0); @@ -1857,8 +1791,8 @@ static void gen_xsxsigqp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, rB(ctx->opcode) + 32); - get_cpu_vsrl(xbl, rB(ctx->opcode) + 32); + get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); + get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false); exp = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); zr = tcg_const_i64(0); @@ -1869,9 +1803,9 @@ static void gen_xsxsigqp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xth, t0, xbh, 0, 48); - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xbl); - set_cpu_vsrl(rD(ctx->opcode) + 32, xtl); + set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(exp); @@ -1904,22 +1838,22 @@ static void gen_xviexpsp(DisasContext *ctx) xal = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xah, xA(ctx->opcode)); - get_cpu_vsrl(xal, xA(ctx->opcode)); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xah, xA(ctx->opcode), true); + get_cpu_vsr(xal, xA(ctx->opcode), false); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF); tcg_gen_andi_i64(t0, xbh, 0xFF000000FF); tcg_gen_shli_i64(t0, t0, 23); tcg_gen_or_i64(xth, xth, t0); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF); tcg_gen_andi_i64(t0, xbl, 0xFF000000FF); tcg_gen_shli_i64(t0, t0, 23); tcg_gen_or_i64(xtl, xtl, t0); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1949,16 +1883,16 @@ static void gen_xviexpdp(DisasContext *ctx) xal = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xah, xA(ctx->opcode)); - get_cpu_vsrl(xal, xA(ctx->opcode)); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xah, xA(ctx->opcode), true); + get_cpu_vsr(xal, xA(ctx->opcode), false); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_deposit_i64(xth, xah, xbh, 52, 11); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1983,15 +1917,15 @@ static void gen_xvxexpsp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_shri_i64(xth, xbh, 23); tcg_gen_andi_i64(xth, xth, 0xFF000000FF); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_shri_i64(xtl, xbl, 23); tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -2014,13 +1948,13 @@ static void gen_xvxexpdp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_extract_i64(xth, xbh, 52, 11); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_extract_i64(xtl, xbl, 52, 11); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -2046,8 +1980,8 @@ static void gen_xvxsigdp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); exp = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); zr = tcg_const_i64(0); @@ -2058,14 +1992,14 @@ static void gen_xvxsigdp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xth, t0, xbh, 0, 52); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_extract_i64(exp, xbl, 52, 11); tcg_gen_movi_i64(t0, 0x0010000000000000); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(exp); @@ -2077,6 +2011,180 @@ static void gen_xvxsigdp(DisasContext *ctx) tcg_temp_free_i64(xbl); } +static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ, + int rt, bool store, bool paired) +{ + TCGv ea; + TCGv_i64 xt; + MemOp mop; + int rt1, rt2; + + xt = tcg_temp_new_i64(); + + mop = DEF_MEMOP(MO_Q); + + gen_set_access_type(ctx, ACCESS_INT); + ea = do_ea_calc(ctx, ra, displ); + + if (paired && ctx->le_mode) { + rt1 = rt + 1; + rt2 = rt; + } else { + rt1 = rt; + rt2 = rt + 1; + } + + if (store) { + get_cpu_vsr(xt, rt1, !ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + gen_addr_add(ctx, ea, ea, 8); + get_cpu_vsr(xt, rt1, ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + if (paired) { + gen_addr_add(ctx, ea, ea, 8); + get_cpu_vsr(xt, rt2, !ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + gen_addr_add(ctx, ea, ea, 8); + get_cpu_vsr(xt, rt2, ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + } + } else { + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt1, xt, !ctx->le_mode); + gen_addr_add(ctx, ea, ea, 8); + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt1, xt, ctx->le_mode); + if (paired) { + gen_addr_add(ctx, ea, ea, 8); + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt2, xt, !ctx->le_mode); + gen_addr_add(ctx, ea, ea, 8); + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt2, xt, ctx->le_mode); + } + } + + tcg_temp_free(ea); + tcg_temp_free_i64(xt); + return true; +} + +static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired) +{ + if (paired) { + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + } else { + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + } + + if (paired || a->rt >= 32) { + REQUIRE_VSX(ctx); + } else { + REQUIRE_VECTOR(ctx); + } + + return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired); +} + +static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a, + bool store, bool paired) +{ + arg_D d; + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + + return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired); +} + +static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired) +{ + if (paired) { + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + } else { + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + } + + if (paired || a->rt >= 32) { + REQUIRE_VSX(ctx); + } else { + REQUIRE_VECTOR(ctx); + } + + return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired); +} + +TRANS(STXV, do_lstxv_D, true, false) +TRANS(LXV, do_lstxv_D, false, false) +TRANS(STXVP, do_lstxv_D, true, true) +TRANS(LXVP, do_lstxv_D, false, true) +TRANS(STXVX, do_lstxv_X, true, false) +TRANS(LXVX, do_lstxv_X, false, false) +TRANS(STXVPX, do_lstxv_X, true, true) +TRANS(LXVPX, do_lstxv_X, false, true) +TRANS64(PSTXV, do_lstxv_PLS_D, true, false) +TRANS64(PLXV, do_lstxv_PLS_D, false, false) +TRANS64(PSTXVP, do_lstxv_PLS_D, true, true) +TRANS64(PLXVP, do_lstxv_PLS_D, false, true) + +static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, + TCGv_vec c) +{ + TCGv_vec tmp = tcg_temp_new_vec_matching(c); + tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1); + tcg_gen_bitsel_vec(vece, t, tmp, b, a); + tcg_temp_free_vec(tmp); +} + +static bool do_xxblendv(DisasContext *ctx, arg_XX4 *a, unsigned vece) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, 0 + }; + static const GVecGen4 ops[4] = { + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVB, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVH, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVW, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVD, + .opt_opc = vecop_list, + .vece = MO_64 + } + }; + + REQUIRE_VSX(ctx); + + tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa), + vsr_full_offset(a->xb), vsr_full_offset(a->xc), + 16, 16, &ops[vece]); + + return true; +} + +TRANS(XXBLENDVB, do_xxblendv, MO_8) +TRANS(XXBLENDVH, do_xxblendv, MO_16) +TRANS(XXBLENDVW, do_xxblendv, MO_32) +TRANS(XXBLENDVD, do_xxblendv, MO_64) + #undef GEN_XX2FORM #undef GEN_XX3FORM #undef GEN_XX2IFORM diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc index 1d41beef26..152d1e5c3b 100644 --- a/target/ppc/translate/vsx-ops.c.inc +++ b/target/ppc/translate/vsx-ops.c.inc @@ -10,7 +10,6 @@ GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(lxvx, 0x1F, 0x0C, 0x08, 0x00000040, PPC_NONE, PPC2_ISA300), #if defined(TARGET_PPC64) GEN_HANDLER_E(lxvl, 0x1F, 0x0D, 0x08, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxvll, 0x1F, 0x0D, 0x09, 0, PPC_NONE, PPC2_ISA300), @@ -25,7 +24,6 @@ GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(stxvx, 0x1F, 0x0C, 0x0C, 0, PPC_NONE, PPC2_ISA300), #if defined(TARGET_PPC64) GEN_HANDLER_E(stxvl, 0x1F, 0x0D, 0x0C, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxvll, 0x1F, 0x0D, 0x0D, 0, PPC_NONE, PPC2_ISA300), @@ -350,8 +348,6 @@ GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX), GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX), GEN_XX3FORM(xxperm, 0x08, 0x03, PPC2_ISA300), GEN_XX3FORM(xxpermr, 0x08, 0x07, PPC2_ISA300), -GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX), -GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300), GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00), GEN_XX2FORM_EXT(xxextractuw, 0x0A, 0x0A, PPC2_ISA300), GEN_XX2FORM_EXT(xxinsertw, 0x0A, 0x0B, PPC2_ISA300), diff --git a/tests/qtest/virtio-net-test.c b/tests/qtest/virtio-net-test.c index a08e2ffe12..8bf74e516c 100644 --- a/tests/qtest/virtio-net-test.c +++ b/tests/qtest/virtio-net-test.c @@ -319,7 +319,7 @@ static void register_virtio_net_test(void) .before = virtio_net_test_setup, }; - qos_add_test("hotplug", "virtio-pci", hotplug, &opts); + qos_add_test("hotplug", "virtio-net-pci", hotplug, &opts); #ifndef _WIN32 qos_add_test("basic", "virtio-net", send_recv_test, &opts); qos_add_test("rx_stop_cont", "virtio-net", stop_cont_test, &opts); |