aboutsummaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
Diffstat (limited to 'docs')
-rw-r--r--docs/mach-virt-graphical.cfg281
-rw-r--r--docs/mach-virt-serial.cfg243
-rw-r--r--docs/migration.txt71
-rw-r--r--docs/q35-chipset.cfg152
-rw-r--r--docs/q35-emulated.cfg288
-rw-r--r--docs/q35-virtio-graphical.cfg248
-rw-r--r--docs/q35-virtio-serial.cfg193
-rw-r--r--docs/replay.txt7
8 files changed, 1331 insertions, 152 deletions
diff --git a/docs/mach-virt-graphical.cfg b/docs/mach-virt-graphical.cfg
new file mode 100644
index 0000000000..0fdf6846dd
--- /dev/null
+++ b/docs/mach-virt-graphical.cfg
@@ -0,0 +1,281 @@
+# mach-virt - VirtIO guest (graphical console)
+# =========================================================
+#
+# Usage:
+#
+# $ qemu-system-aarch64 \
+# -nodefaults \
+# -readconfig mach-virt-graphical.cfg \
+# -cpu host
+#
+# You will probably need to tweak the lines marked as
+# CHANGE ME before being able to use this configuration!
+#
+# The guest will have a selection of VirtIO devices
+# tailored towards optimal performance with modern guests,
+# and will be accessed through a graphical console.
+#
+# ---------------------------------------------------------
+#
+# Using -nodefaults is required to have full control over
+# the virtual hardware: when it's specified, QEMU will
+# populate the board with only the builtin peripherals,
+# such as the PL011 UART, plus a PCI Express Root Bus; the
+# user will then have to explicitly add further devices.
+#
+# The PCI Express Root Bus shows up in the guest as:
+#
+# 00:00.0 Host bridge
+#
+# This configuration file adds a number of other useful
+# devices, more specifically:
+#
+# 00:01.0 Display controller
+# 00.1c.* PCI bridge (PCI Express Root Ports)
+# 01:00.0 SCSI storage controller
+# 02:00.0 Ethernet controller
+# 03:00.0 USB controller
+#
+# More information about these devices is available below.
+
+
+# Machine options
+# =========================================================
+#
+# We use the virt machine type and enable KVM acceleration
+# for better performance.
+#
+# Using less than 1 GiB of memory is probably not going to
+# yield good performance in the guest, and might even lead
+# to obscure boot issues in some cases.
+#
+# Unfortunately, there is no way to configure the CPU model
+# in this file, so it will have to be provided on the
+# command line, but we can configure the guest to use the
+# same GIC version as the host.
+
+[machine]
+ type = "virt"
+ accel = "kvm"
+ gic-version = "host"
+
+[memory]
+ size = "1024"
+
+
+# Firmware configuration
+# =========================================================
+#
+# There are two parts to the firmware: a read-only image
+# containing the executable code, which is shared between
+# guests, and a read/write variable store that is owned
+# by one specific guest, exclusively, and is used to
+# record information such as the UEFI boot order.
+#
+# For any new guest, its permanent, private variable store
+# should initially be copied from the template file
+# provided along with the firmware binary.
+#
+# Depending on the OS distribution you're using on the
+# host, the name of the package containing the firmware
+# binary and variable store template, as well as the paths
+# to the files themselves, will be different. For example:
+#
+# Fedora
+# edk2-aarch64 (pkg)
+# /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw (bin)
+# /usr/share/edk2/aarch64/vars-template-pflash.raw (var)
+#
+# RHEL
+# AAVMF (pkg)
+# /usr/share/AAVMF/AAVMF_CODE.fd (bin)
+# /usr/share/AAVMF/AAVMF_VARS.fd (var)
+#
+# Debian/Ubuntu
+# qemu-efi (pkg)
+# /usr/share/AAVMF/AAVMF_CODE.fd (bin)
+# /usr/share/AAVMF/AAVMF_VARS.fd (var)
+
+[drive "uefi-binary"]
+ file = "/usr/share/AAVMF/AAVMF_CODE.fd" # CHANGE ME
+ format = "raw"
+ if = "pflash"
+ unit = "0"
+ readonly = "on"
+
+[drive "uefi-varstore"]
+ file = "guest_VARS.fd" # CHANGE ME
+ format = "raw"
+ if = "pflash"
+ unit = "1"
+
+
+# PCI bridge (PCI Express Root Ports)
+# =========================================================
+#
+# We create eight PCI Express Root Ports, and we plug them
+# all into separate functions of the same slot. Some of
+# them will be used by devices, the rest will remain
+# available for hotplug.
+
+[device "pcie.1"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.0"
+ port = "1"
+ chassis = "1"
+ multifunction = "on"
+
+[device "pcie.2"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.1"
+ port = "2"
+ chassis = "2"
+
+[device "pcie.3"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.2"
+ port = "3"
+ chassis = "3"
+
+[device "pcie.4"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.3"
+ port = "4"
+ chassis = "4"
+
+[device "pcie.5"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.4"
+ port = "5"
+ chassis = "5"
+
+[device "pcie.6"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.5"
+ port = "6"
+ chassis = "6"
+
+[device "pcie.7"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.6"
+ port = "7"
+ chassis = "7"
+
+[device "pcie.8"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.7"
+ port = "8"
+ chassis = "8"
+
+
+# SCSI storage controller (and storage)
+# =========================================================
+#
+# We use virtio-scsi here so that we can (hot)plug a large
+# number of disks without running into issues; a SCSI disk,
+# backed by a qcow2 disk image on the host's filesystem, is
+# attached to it.
+#
+# We also create an optical disk, mostly for installation
+# purposes: once the guest OS has been succesfully
+# installed, the guest will no longer boot from optical
+# media. If you don't want, or no longer want, to have an
+# optical disk in the guest you can safely comment out
+# all relevant sections below.
+
+[device "scsi"]
+ driver = "virtio-scsi-pci"
+ bus = "pcie.1"
+ addr = "00.0"
+
+[device "scsi-disk"]
+ driver = "scsi-hd"
+ bus = "scsi.0"
+ drive = "disk"
+ bootindex = "1"
+
+[drive "disk"]
+ file = "guest.qcow2" # CHANGE ME
+ format = "qcow2"
+ if = "none"
+
+[device "scsi-optical-disk"]
+ driver = "scsi-cd"
+ bus = "scsi.0"
+ drive = "optical-disk"
+ bootindex = "2"
+
+[drive "optical-disk"]
+ file = "install.iso" # CHANGE ME
+ format = "raw"
+ if = "none"
+
+
+# Ethernet controller
+# =========================================================
+#
+# We use virtio-net for improved performance over emulated
+# hardware; on the host side, we take advantage of user
+# networking so that the QEMU process doesn't require any
+# additional privileges.
+
+[netdev "hostnet"]
+ type = "user"
+
+[device "net"]
+ driver = "virtio-net-pci"
+ netdev = "hostnet"
+ bus = "pcie.2"
+ addr = "00.0"
+
+
+# USB controller (and input devices)
+# =========================================================
+#
+# We add a virtualization-friendly USB 3.0 controller and
+# a USB keyboard / USB tablet combo so that graphical
+# guests can be controlled appropriately.
+
+[device "usb"]
+ driver = "nec-usb-xhci"
+ bus = "pcie.3"
+ addr = "00.0"
+
+[device "keyboard"]
+ driver = "usb-kbd"
+ bus = "usb.0"
+
+[device "tablet"]
+ driver = "usb-tablet"
+ bus = "usb.0"
+
+
+# Display controller
+# =========================================================
+#
+# We use virtio-gpu because the legacy VGA framebuffer is
+# very troublesome on aarch64, and virtio-gpu is the only
+# video device that doesn't implement it.
+#
+# If you're running the guest on a remote, potentially
+# headless host, you will probably want to append something
+# like
+#
+# -display vnc=127.0.0.1:0
+#
+# to the command line in order to prevent QEMU from
+# creating a graphical display window on the host and
+# enable remote access instead.
+
+[device "video"]
+ driver = "virtio-gpu"
+ bus = "pcie.0"
+ addr = "01.0"
diff --git a/docs/mach-virt-serial.cfg b/docs/mach-virt-serial.cfg
new file mode 100644
index 0000000000..aee9f1c5a1
--- /dev/null
+++ b/docs/mach-virt-serial.cfg
@@ -0,0 +1,243 @@
+# mach-virt - VirtIO guest (serial console)
+# =========================================================
+#
+# Usage:
+#
+# $ qemu-system-aarch64 \
+# -nodefaults \
+# -readconfig mach-virt-serial.cfg \
+# -display none -serial mon:stdio \
+# -cpu host
+#
+# You will probably need to tweak the lines marked as
+# CHANGE ME before being able to use this configuration!
+#
+# The guest will have a selection of VirtIO devices
+# tailored towards optimal performance with modern guests,
+# and will be accessed through the serial console.
+#
+# ---------------------------------------------------------
+#
+# Using -nodefaults is required to have full control over
+# the virtual hardware: when it's specified, QEMU will
+# populate the board with only the builtin peripherals,
+# such as the PL011 UART, plus a PCI Express Root Bus; the
+# user will then have to explicitly add further devices.
+#
+# The PCI Express Root Bus shows up in the guest as:
+#
+# 00:00.0 Host bridge
+#
+# This configuration file adds a number of other useful
+# devices, more specifically:
+#
+# 00.1c.* PCI bridge (PCI Express Root Ports)
+# 01:00.0 SCSI storage controller
+# 02:00.0 Ethernet controller
+#
+# More information about these devices is available below.
+#
+# We use '-display none' to prevent QEMU from creating a
+# graphical display window, which would serve no use in
+# this specific configuration, and '-serial mon:stdio' to
+# multiplex the guest's serial console and the QEMU monitor
+# to the host's stdio; use 'Ctrl+A h' to learn how to
+# switch between the two and more.
+
+
+# Machine options
+# =========================================================
+#
+# We use the virt machine type and enable KVM acceleration
+# for better performance.
+#
+# Using less than 1 GiB of memory is probably not going to
+# yield good performance in the guest, and might even lead
+# to obscure boot issues in some cases.
+#
+# Unfortunately, there is no way to configure the CPU model
+# in this file, so it will have to be provided on the
+# command line, but we can configure the guest to use the
+# same GIC version as the host.
+
+[machine]
+ type = "virt"
+ accel = "kvm"
+ gic-version = "host"
+
+[memory]
+ size = "1024"
+
+
+# Firmware configuration
+# =========================================================
+#
+# There are two parts to the firmware: a read-only image
+# containing the executable code, which is shared between
+# guests, and a read/write variable store that is owned
+# by one specific guest, exclusively, and is used to
+# record information such as the UEFI boot order.
+#
+# For any new guest, its permanent, private variable store
+# should initially be copied from the template file
+# provided along with the firmware binary.
+#
+# Depending on the OS distribution you're using on the
+# host, the name of the package containing the firmware
+# binary and variable store template, as well as the paths
+# to the files themselves, will be different. For example:
+#
+# Fedora
+# edk2-aarch64 (pkg)
+# /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw (bin)
+# /usr/share/edk2/aarch64/vars-template-pflash.raw (var)
+#
+# RHEL
+# AAVMF (pkg)
+# /usr/share/AAVMF/AAVMF_CODE.fd (bin)
+# /usr/share/AAVMF/AAVMF_VARS.fd (var)
+#
+# Debian/Ubuntu
+# qemu-efi (pkg)
+# /usr/share/AAVMF/AAVMF_CODE.fd (bin)
+# /usr/share/AAVMF/AAVMF_VARS.fd (var)
+
+[drive "uefi-binary"]
+ file = "/usr/share/AAVMF/AAVMF_CODE.fd" # CHANGE ME
+ format = "raw"
+ if = "pflash"
+ unit = "0"
+ readonly = "on"
+
+[drive "uefi-varstore"]
+ file = "guest_VARS.fd" # CHANGE ME
+ format = "raw"
+ if = "pflash"
+ unit = "1"
+
+
+# PCI bridge (PCI Express Root Ports)
+# =========================================================
+#
+# We create eight PCI Express Root Ports, and we plug them
+# all into separate functions of the same slot. Some of
+# them will be used by devices, the rest will remain
+# available for hotplug.
+
+[device "pcie.1"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.0"
+ port = "1"
+ chassis = "1"
+ multifunction = "on"
+
+[device "pcie.2"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.1"
+ port = "2"
+ chassis = "2"
+
+[device "pcie.3"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.2"
+ port = "3"
+ chassis = "3"
+
+[device "pcie.4"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.3"
+ port = "4"
+ chassis = "4"
+
+[device "pcie.5"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.4"
+ port = "5"
+ chassis = "5"
+
+[device "pcie.6"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.5"
+ port = "6"
+ chassis = "6"
+
+[device "pcie.7"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.6"
+ port = "7"
+ chassis = "7"
+
+[device "pcie.8"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.7"
+ port = "8"
+ chassis = "8"
+
+
+# SCSI storage controller (and storage)
+# =========================================================
+#
+# We use virtio-scsi here so that we can (hot)plug a large
+# number of disks without running into issues; a SCSI disk,
+# backed by a qcow2 disk image on the host's filesystem, is
+# attached to it.
+#
+# We also create an optical disk, mostly for installation
+# purposes: once the guest OS has been succesfully
+# installed, the guest will no longer boot from optical
+# media. If you don't want, or no longer want, to have an
+# optical disk in the guest you can safely comment out
+# all relevant sections below.
+
+[device "scsi"]
+ driver = "virtio-scsi-pci"
+ bus = "pcie.1"
+ addr = "00.0"
+
+[device "scsi-disk"]
+ driver = "scsi-hd"
+ bus = "scsi.0"
+ drive = "disk"
+ bootindex = "1"
+
+[drive "disk"]
+ file = "guest.qcow2" # CHANGE ME
+ format = "qcow2"
+ if = "none"
+
+[device "scsi-optical-disk"]
+ driver = "scsi-cd"
+ bus = "scsi.0"
+ drive = "optical-disk"
+ bootindex = "2"
+
+[drive "optical-disk"]
+ file = "install.iso" # CHANGE ME
+ format = "raw"
+ if = "none"
+
+
+# Ethernet controller
+# =========================================================
+#
+# We use virtio-net for improved performance over emulated
+# hardware; on the host side, we take advantage of user
+# networking so that the QEMU process doesn't require any
+# additional privileges.
+
+[netdev "hostnet"]
+ type = "user"
+
+[device "net"]
+ driver = "virtio-net-pci"
+ netdev = "hostnet"
+ bus = "pcie.2"
+ addr = "00.0"
diff --git a/docs/migration.txt b/docs/migration.txt
index 6503c17685..1b940a829b 100644
--- a/docs/migration.txt
+++ b/docs/migration.txt
@@ -161,6 +161,11 @@ include/hw/hw.h.
=== More about versions ===
+Version numbers are intended for major incompatible changes to the
+migration of a device, and using them breaks backwards-migration
+compatibility; in general most changes can be made by adding Subsections
+(see below) or _TEST macros (see below) which won't break compatibility.
+
You can see that there are several version fields:
- version_id: the maximum version_id supported by VMState for that device.
@@ -175,6 +180,9 @@ version_id. And the function load_state_old() (if present) is able to
load state from minimum_version_id_old to minimum_version_id. This
function is deprecated and will be removed when no more users are left.
+Saving state will always create a section with the 'version_id' value
+and thus can't be loaded by any older QEMU.
+
=== Massaging functions ===
Sometimes, it is not enough to be able to save the state directly
@@ -292,6 +300,56 @@ save/send this state when we are in the middle of a pio operation
not enabled, the values on that fields are garbage and don't need to
be sent.
+Using a condition function that checks a 'property' to determine whether
+to send a subsection allows backwards migration compatibility when
+new subsections are added.
+
+For example;
+ a) Add a new property using DEFINE_PROP_BOOL - e.g. support-foo and
+ default it to true.
+ b) Add an entry to the HW_COMPAT_ for the previous version
+ that sets the property to false.
+ c) Add a static bool support_foo function that tests the property.
+ d) Add a subsection with a .needed set to the support_foo function
+ e) (potentially) Add a pre_load that sets up a default value for 'foo'
+ to be used if the subsection isn't loaded.
+
+Now that subsection will not be generated when using an older
+machine type and the migration stream will be accepted by older
+QEMU versions. pre-load functions can be used to initialise state
+on the newer version so that they default to suitable values
+when loading streams created by older QEMU versions that do not
+generate the subsection.
+
+In some cases subsections are added for data that had been accidentally
+omitted by earlier versions; if the missing data causes the migration
+process to succeed but the guest to behave badly then it may be better
+to send the subsection and cause the migration to explicitly fail
+with the unknown subsection error. If the bad behaviour only happens
+with certain data values, making the subsection conditional on
+the data value (rather than the machine type) allows migrations to succeed
+in most cases. In general the preference is to tie the subsection to
+the machine type, and allow reliable migrations, unless the behaviour
+from omission of the subsection is really bad.
+
+= Not sending existing elements =
+
+Sometimes members of the VMState are no longer needed;
+ removing them will break migration compatibility
+ making them version dependent and bumping the version will break backwards
+ migration compatibility.
+
+The best way is to:
+ a) Add a new property/compatibility/function in the same way for subsections
+ above.
+ b) replace the VMSTATE macro with the _TEST version of the macro, e.g.:
+ VMSTATE_UINT32(foo, barstruct)
+ becomes
+ VMSTATE_UINT32_TEST(foo, barstruct, pre_version_baz)
+
+ Sometime in the future when we no longer care about the ancient
+versions these can be killed off.
+
= Return path =
In most migration scenarios there is only a single data path that runs
@@ -482,3 +540,16 @@ request for a page that has already been sent is ignored. Duplicate requests
such as this can happen as a page is sent at about the same time the
destination accesses it.
+=== Postcopy with hugepages ===
+
+Postcopy now works with hugetlbfs backed memory:
+ a) The linux kernel on the destination must support userfault on hugepages.
+ b) The huge-page configuration on the source and destination VMs must be
+ identical; i.e. RAMBlocks on both sides must use the same page size.
+ c) Note that -mem-path /dev/hugepages will fall back to allocating normal
+ RAM if it doesn't have enough hugepages, triggering (b) to fail.
+ Using -mem-prealloc enforces the allocation using hugepages.
+ d) Care should be taken with the size of hugepage used; postcopy with 2MB
+ hugepages works well, however 1GB hugepages are likely to be problematic
+ since it takes ~1 second to transfer a 1GB hugepage across a 10Gbps link,
+ and until the full page is transferred the destination thread is blocked.
diff --git a/docs/q35-chipset.cfg b/docs/q35-chipset.cfg
deleted file mode 100644
index e4ddb7d9cc..0000000000
--- a/docs/q35-chipset.cfg
+++ /dev/null
@@ -1,152 +0,0 @@
-################################################################
-#
-# qemu -M q35 creates a bare machine with just the very essential
-# chipset devices being present:
-#
-# 00.0 - Host bridge
-# 1f.0 - ISA bridge / LPC
-# 1f.2 - SATA (AHCI) controller
-# 1f.3 - SMBus controller
-#
-# This config file documents the other devices and how they are
-# created. You can simply use "-readconfig $thisfile" to create
-# them all. Here is a overview:
-#
-# 19.0 - Ethernet controller (not created, our e1000 emulation
-# doesn't emulate the ich9 device).
-# 1a.* - USB Controller #2 (ehci + uhci companions)
-# 1b.0 - HD Audio Controller
-# 1c.* - PCI Express Ports
-# 1d.* - USB Controller #1 (ehci + uhci companions,
-# "qemu -M q35 -usb" creates these too)
-# 1e.0 - PCI Bridge
-#
-
-[device "ich9-ehci-2"]
- driver = "ich9-usb-ehci2"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1a.7"
-
-[device "ich9-uhci-4"]
- driver = "ich9-usb-uhci4"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1a.0"
- masterbus = "ich9-ehci-2.0"
- firstport = "0"
-
-[device "ich9-uhci-5"]
- driver = "ich9-usb-uhci5"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1a.1"
- masterbus = "ich9-ehci-2.0"
- firstport = "2"
-
-[device "ich9-uhci-6"]
- driver = "ich9-usb-uhci6"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1a.2"
- masterbus = "ich9-ehci-2.0"
- firstport = "4"
-
-
-[device "ich9-hda-audio"]
- driver = "ich9-intel-hda"
- bus = "pcie.0"
- addr = "1b.0"
-
-
-[device "ich9-pcie-port-1"]
- driver = "ioh3420"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1c.0"
- port = "1"
- chassis = "1"
-
-[device "ich9-pcie-port-2"]
- driver = "ioh3420"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1c.1"
- port = "2"
- chassis = "2"
-
-[device "ich9-pcie-port-3"]
- driver = "ioh3420"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1c.2"
- port = "3"
- chassis = "3"
-
-[device "ich9-pcie-port-4"]
- driver = "ioh3420"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1c.3"
- port = "4"
- chassis = "4"
-
-##
-# Example PCIe switch with two downstream ports
-#
-#[device "pcie-switch-upstream-port-1"]
-# driver = "x3130-upstream"
-# bus = "ich9-pcie-port-4"
-# addr = "00.0"
-#
-#[device "pcie-switch-downstream-port-1-1"]
-# driver = "xio3130-downstream"
-# multifunction = "on"
-# bus = "pcie-switch-upstream-port-1"
-# addr = "00.0"
-# port = "1"
-# chassis = "5"
-#
-#[device "pcie-switch-downstream-port-1-2"]
-# driver = "xio3130-downstream"
-# multifunction = "on"
-# bus = "pcie-switch-upstream-port-1"
-# addr = "00.1"
-# port = "1"
-# chassis = "6"
-
-[device "ich9-ehci-1"]
- driver = "ich9-usb-ehci1"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1d.7"
-
-[device "ich9-uhci-1"]
- driver = "ich9-usb-uhci1"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1d.0"
- masterbus = "ich9-ehci-1.0"
- firstport = "0"
-
-[device "ich9-uhci-2"]
- driver = "ich9-usb-uhci2"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1d.1"
- masterbus = "ich9-ehci-1.0"
- firstport = "2"
-
-[device "ich9-uhci-3"]
- driver = "ich9-usb-uhci3"
- multifunction = "on"
- bus = "pcie.0"
- addr = "1d.2"
- masterbus = "ich9-ehci-1.0"
- firstport = "4"
-
-
-[device "ich9-pci-bridge"]
- driver = "i82801b11-bridge"
- bus = "pcie.0"
- addr = "1e.0"
diff --git a/docs/q35-emulated.cfg b/docs/q35-emulated.cfg
new file mode 100644
index 0000000000..c6416d6545
--- /dev/null
+++ b/docs/q35-emulated.cfg
@@ -0,0 +1,288 @@
+# q35 - Emulated guest (graphical console)
+# =========================================================
+#
+# Usage:
+#
+# $ qemu-system-x86_64 \
+# -nodefaults \
+# -readconfig q35-emulated.cfg
+#
+# You will probably need to tweak the lines marked as
+# CHANGE ME before being able to use this configuration!
+#
+# The guest will have a selection of emulated devices that
+# closely resembles that of a physical machine, and will be
+# accessed through a graphical console.
+#
+# ---------------------------------------------------------
+#
+# Using -nodefaults is required to have full control over
+# the virtual hardware: when it's specified, QEMU will
+# populate the board with only the builtin peripherals
+# plus a small selection of core PCI devices and
+# controllers; the user will then have to explicitly add
+# further devices.
+#
+# The core PCI devices show up in the guest as:
+#
+# 00:00.0 Host bridge
+# 00:1f.0 ISA bridge / LPC
+# 00:1f.2 SATA (AHCI) controller
+# 00:1f.3 SMBus controller
+#
+# This configuration file adds a number of devices that
+# are pretty much guaranteed to be present in every single
+# physical machine based on q35, more specifically:
+#
+# 00:01.0 VGA compatible controller
+# 00:19.0 Ethernet controller
+# 00:1a.* USB controller (#2)
+# 00:1b.0 Audio device
+# 00:1c.* PCI bridge (PCI Express Root Ports)
+# 00:1d.* USB Controller (#1)
+# 00:1e.0 PCI bridge (legacy PCI bridge)
+#
+# More information about these devices is available below.
+
+
+# Machine options
+# =========================================================
+#
+# We use the q35 machine type and enable KVM acceleration
+# for better performance.
+#
+# Using less than 1 GiB of memory is probably not going to
+# yield good performance in the guest, and might even lead
+# to obscure boot issues in some cases.
+#
+# Unfortunately, there is no way to configure the CPU model
+# in this file, so it will have to be provided on the
+# command line.
+
+[machine]
+ type = "q35"
+ accel = "kvm"
+
+[memory]
+ size = "1024"
+
+
+# PCI bridge (PCI Express Root Ports)
+# =========================================================
+#
+# We add four PCI Express Root Ports, all sharing the same
+# slot on the PCI Express Root Bus. These ports support
+# hotplug.
+
+[device "ich9-pcie-port-1"]
+ driver = "ioh3420"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1c.0"
+ port = "1"
+ chassis = "1"
+
+[device "ich9-pcie-port-2"]
+ driver = "ioh3420"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1c.1"
+ port = "2"
+ chassis = "2"
+
+[device "ich9-pcie-port-3"]
+ driver = "ioh3420"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1c.2"
+ port = "3"
+ chassis = "3"
+
+[device "ich9-pcie-port-4"]
+ driver = "ioh3420"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1c.3"
+ port = "4"
+ chassis = "4"
+
+
+# PCI bridge (legacy PCI bridge)
+# =========================================================
+#
+# This bridge can be used to build an independent topology
+# for legacy PCI devices. PCI Express devices should be
+# plugged into PCI Express slots instead, so ideally there
+# will be no devices connected to this bridge.
+
+[device "ich9-pci-bridge"]
+ driver = "i82801b11-bridge"
+ bus = "pcie.0"
+ addr = "1e.0"
+
+
+# SATA storage
+# =========================================================
+#
+# An implicit SATA controller is created automatically for
+# every single q35 guest; here we create a disk, backed by
+# a qcow2 disk image on the host's filesystem, and attach
+# it to that controller so that the guest can use it.
+#
+# We also create an optical disk, mostly for installation
+# purposes: once the guest OS has been succesfully
+# installed, the guest will no longer boot from optical
+# media. If you don't want, or no longer want, to have an
+# optical disk in the guest you can safely comment out
+# all relevant sections below.
+
+[device "sata-disk"]
+ driver = "ide-hd"
+ bus = "ide.0"
+ drive = "disk"
+ bootindex = "1"
+
+[drive "disk"]
+ file = "guest.qcow2" # CHANGE ME
+ format = "qcow2"
+ if = "none"
+
+[device "sata-optical-disk"]
+ driver = "ide-cd"
+ bus = "ide.1"
+ drive = "optical-disk"
+ bootindex = "2"
+
+[drive "optical-disk"]
+ file = "install.iso" # CHANGE ME
+ format = "raw"
+ if = "none"
+
+
+# USB controller (#1)
+# =========================================================
+#
+# EHCI controller + UHCI companion controllers.
+
+[device "ich9-ehci-1"]
+ driver = "ich9-usb-ehci1"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1d.7"
+
+[device "ich9-uhci-1"]
+ driver = "ich9-usb-uhci1"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1d.0"
+ masterbus = "ich9-ehci-1.0"
+ firstport = "0"
+
+[device "ich9-uhci-2"]
+ driver = "ich9-usb-uhci2"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1d.1"
+ masterbus = "ich9-ehci-1.0"
+ firstport = "2"
+
+[device "ich9-uhci-3"]
+ driver = "ich9-usb-uhci3"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1d.2"
+ masterbus = "ich9-ehci-1.0"
+ firstport = "4"
+
+
+# USB controller (#2)
+# =========================================================
+#
+# EHCI controller + UHCI companion controllers.
+
+[device "ich9-ehci-2"]
+ driver = "ich9-usb-ehci2"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1a.7"
+
+[device "ich9-uhci-4"]
+ driver = "ich9-usb-uhci4"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1a.0"
+ masterbus = "ich9-ehci-2.0"
+ firstport = "0"
+
+[device "ich9-uhci-5"]
+ driver = "ich9-usb-uhci5"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1a.1"
+ masterbus = "ich9-ehci-2.0"
+ firstport = "2"
+
+[device "ich9-uhci-6"]
+ driver = "ich9-usb-uhci6"
+ multifunction = "on"
+ bus = "pcie.0"
+ addr = "1a.2"
+ masterbus = "ich9-ehci-2.0"
+ firstport = "4"
+
+
+# Ethernet controller
+# =========================================================
+#
+# We add a Gigabit Ethernet interface to the guest; on the
+# host side, we take advantage of user networking so that
+# the QEMU process doesn't require any additional
+# privileges.
+
+[netdev "hostnet"]
+ type = "user"
+
+[device "net"]
+ driver = "e1000"
+ netdev = "hostnet"
+ bus = "pcie.0"
+ addr = "19.0"
+
+
+# VGA compatible controller
+# =========================================================
+#
+# We use stdvga instead of Cirrus as it supports more video
+# modes and is closer to what actual hardware looks like.
+#
+# If you're running the guest on a remote, potentially
+# headless host, you will probably want to append something
+# like
+#
+# -display vnc=127.0.0.1:0
+#
+# to the command line in order to prevent QEMU from
+# creating a graphical display window on the host and
+# enable remote access instead.
+
+[device "video"]
+ driver = "VGA"
+ bus = "pcie.0"
+ addr = "01.0"
+
+
+# Audio device
+# =========================================================
+#
+# The sound card is a legacy PCI device that is plugged
+# directly into the PCI Express Root Bus.
+
+[device "ich9-hda-audio"]
+ driver = "ich9-intel-hda"
+ bus = "pcie.0"
+ addr = "1b.0"
+
+[device "ich9-hda-duplex"]
+ driver = "hda-duplex"
+ bus = "ich9-hda-audio.0"
+ cad = "0"
diff --git a/docs/q35-virtio-graphical.cfg b/docs/q35-virtio-graphical.cfg
new file mode 100644
index 0000000000..28bde2fc57
--- /dev/null
+++ b/docs/q35-virtio-graphical.cfg
@@ -0,0 +1,248 @@
+# q35 - VirtIO guest (graphical console)
+# =========================================================
+#
+# Usage:
+#
+# $ qemu-system-x86_64 \
+# -nodefaults \
+# -readconfig q35-virtio-graphical.cfg
+#
+# You will probably need to tweak the lines marked as
+# CHANGE ME before being able to use this configuration!
+#
+# The guest will have a selection of VirtIO devices
+# tailored towards optimal performance with modern guests,
+# and will be accessed through a graphical console.
+#
+# ---------------------------------------------------------
+#
+# Using -nodefaults is required to have full control over
+# the virtual hardware: when it's specified, QEMU will
+# populate the board with only the builtin peripherals
+# plus a small selection of core PCI devices and
+# controllers; the user will then have to explicitly add
+# further devices.
+#
+# The core PCI devices show up in the guest as:
+#
+# 00:00.0 Host bridge
+# 00:1f.0 ISA bridge / LPC
+# 00:1f.2 SATA (AHCI) controller
+# 00:1f.3 SMBus controller
+#
+# This configuration file adds a number of other useful
+# devices, more specifically:
+#
+# 00:01.0 VGA compatible controller
+# 00:1b.0 Audio device
+# 00.1c.* PCI bridge (PCI Express Root Ports)
+# 01:00.0 SCSI storage controller
+# 02:00.0 Ethernet controller
+# 03:00.0 USB controller
+#
+# More information about these devices is available below.
+
+
+# Machine options
+# =========================================================
+#
+# We use the q35 machine type and enable KVM acceleration
+# for better performance.
+#
+# Using less than 1 GiB of memory is probably not going to
+# yield good performance in the guest, and might even lead
+# to obscure boot issues in some cases.
+
+[machine]
+ type = "q35"
+ accel = "kvm"
+
+[memory]
+ size = "1024"
+
+
+# PCI bridge (PCI Express Root Ports)
+# =========================================================
+#
+# We create eight PCI Express Root Ports, and we plug them
+# all into separate functions of the same slot. Some of
+# them will be used by devices, the rest will remain
+# available for hotplug.
+
+[device "pcie.1"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.0"
+ port = "1"
+ chassis = "1"
+ multifunction = "on"
+
+[device "pcie.2"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.1"
+ port = "2"
+ chassis = "2"
+
+[device "pcie.3"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.2"
+ port = "3"
+ chassis = "3"
+
+[device "pcie.4"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.3"
+ port = "4"
+ chassis = "4"
+
+[device "pcie.5"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.4"
+ port = "5"
+ chassis = "5"
+
+[device "pcie.6"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.5"
+ port = "6"
+ chassis = "6"
+
+[device "pcie.7"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.6"
+ port = "7"
+ chassis = "7"
+
+[device "pcie.8"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.7"
+ port = "8"
+ chassis = "8"
+
+
+# SCSI storage controller (and storage)
+# =========================================================
+#
+# We use virtio-scsi here so that we can (hot)plug a large
+# number of disks without running into issues; a SCSI disk,
+# backed by a qcow2 disk image on the host's filesystem, is
+# attached to it.
+#
+# We also create an optical disk, mostly for installation
+# purposes: once the guest OS has been succesfully
+# installed, the guest will no longer boot from optical
+# media. If you don't want, or no longer want, to have an
+# optical disk in the guest you can safely comment out
+# all relevant sections below.
+
+[device "scsi"]
+ driver = "virtio-scsi-pci"
+ bus = "pcie.1"
+ addr = "00.0"
+
+[device "scsi-disk"]
+ driver = "scsi-hd"
+ bus = "scsi.0"
+ drive = "disk"
+ bootindex = "1"
+
+[drive "disk"]
+ file = "guest.qcow2" # CHANGE ME
+ format = "qcow2"
+ if = "none"
+
+[device "scsi-optical-disk"]
+ driver = "scsi-cd"
+ bus = "scsi.0"
+ drive = "optical-disk"
+ bootindex = "2"
+
+[drive "optical-disk"]
+ file = "install.iso" # CHANGE ME
+ format = "raw"
+ if = "none"
+
+
+# Ethernet controller
+# =========================================================
+#
+# We use virtio-net for improved performance over emulated
+# hardware; on the host side, we take advantage of user
+# networking so that the QEMU process doesn't require any
+# additional privileges.
+
+[netdev "hostnet"]
+ type = "user"
+
+[device "net"]
+ driver = "virtio-net-pci"
+ netdev = "hostnet"
+ bus = "pcie.2"
+ addr = "00.0"
+
+
+# USB controller (and input devices)
+# =========================================================
+#
+# We add a virtualization-friendly USB 3.0 controller and
+# a USB tablet so that graphical guests can be controlled
+# appropriately. A USB keyboard is not needed, as q35
+# guests get a PS/2 one added automatically.
+
+[device "usb"]
+ driver = "nec-usb-xhci"
+ bus = "pcie.3"
+ addr = "00.0"
+
+[device "tablet"]
+ driver = "usb-tablet"
+ bus = "usb.0"
+
+
+# VGA compatible controller
+# =========================================================
+#
+# We plug the QXL video card directly into the PCI Express
+# Root Bus as it is a legacy PCI device; this way, we can
+# reduce the number of PCI Express controllers in the
+# guest.
+#
+# If you're running the guest on a remote, potentially
+# headless host, you will probably want to append something
+# like
+#
+# -display vnc=127.0.0.1:0
+#
+# to the command line in order to prevent QEMU from
+# creating a graphical display window on the host and
+# enable remote access instead.
+
+[device "video"]
+ driver = "qxl-vga"
+ bus = "pcie.0"
+ addr = "01.0"
+
+
+# Audio device
+# =========================================================
+#
+# Like the video card, the sound card is a legacy PCI
+# device and as such can be plugged directly into the PCI
+# Express Root Bus.
+
+[device "sound"]
+ driver = "ich9-intel-hda"
+ bus = "pcie.0"
+ addr = "1b.0"
+
+[device "duplex"]
+ driver = "hda-duplex"
+ bus = "sound.0"
+ cad = "0"
diff --git a/docs/q35-virtio-serial.cfg b/docs/q35-virtio-serial.cfg
new file mode 100644
index 0000000000..c33c9cc07a
--- /dev/null
+++ b/docs/q35-virtio-serial.cfg
@@ -0,0 +1,193 @@
+# q35 - VirtIO guest (serial console)
+# =========================================================
+#
+# Usage:
+#
+# $ qemu-system-x86_64 \
+# -nodefaults \
+# -readconfig q35-virtio-serial.cfg \
+# -display none -serial mon:stdio
+#
+# You will probably need to tweak the lines marked as
+# CHANGE ME before being able to use this configuration!
+#
+# The guest will have a selection of VirtIO devices
+# tailored towards optimal performance with modern guests,
+# and will be accessed through the serial console.
+#
+# ---------------------------------------------------------
+#
+# Using -nodefaults is required to have full control over
+# the virtual hardware: when it's specified, QEMU will
+# populate the board with only the builtin peripherals
+# plus a small selection of core PCI devices and
+# controllers; the user will then have to explicitly add
+# further devices.
+#
+# The core PCI devices show up in the guest as:
+#
+# 00:00.0 Host bridge
+# 00:1f.0 ISA bridge / LPC
+# 00:1f.2 SATA (AHCI) controller
+# 00:1f.3 SMBus controller
+#
+# This configuration file adds a number of other useful
+# devices, more specifically:
+#
+# 00.1c.* PCI bridge (PCI Express Root Ports)
+# 01:00.0 SCSI storage controller
+# 02:00.0 Ethernet controller
+#
+# More information about these devices is available below.
+#
+# We use '-display none' to prevent QEMU from creating a
+# graphical display window, which would serve no use in
+# this specific configuration, and '-serial mon:stdio' to
+# multiplex the guest's serial console and the QEMU monitor
+# to the host's stdio; use 'Ctrl+A h' to learn how to
+# switch between the two and more.
+
+
+# Machine options
+# =========================================================
+#
+# We use the q35 machine type and enable KVM acceleration
+# for better performance.
+#
+# Using less than 1 GiB of memory is probably not going to
+# yield good performance in the guest, and might even lead
+# to obscure boot issues in some cases.
+
+[machine]
+ type = "q35"
+ accel = "kvm"
+
+[memory]
+ size = "1024"
+
+
+# PCI bridge (PCI Express Root Ports)
+# =========================================================
+#
+# We create eight PCI Express Root Ports, and we plug them
+# all into separate functions of the same slot. Some of
+# them will be used by devices, the rest will remain
+# available for hotplug.
+
+[device "pcie.1"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.0"
+ port = "1"
+ chassis = "1"
+ multifunction = "on"
+
+[device "pcie.2"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.1"
+ port = "2"
+ chassis = "2"
+
+[device "pcie.3"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.2"
+ port = "3"
+ chassis = "3"
+
+[device "pcie.4"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.3"
+ port = "4"
+ chassis = "4"
+
+[device "pcie.5"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.4"
+ port = "5"
+ chassis = "5"
+
+[device "pcie.6"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.5"
+ port = "6"
+ chassis = "6"
+
+[device "pcie.7"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.6"
+ port = "7"
+ chassis = "7"
+
+[device "pcie.8"]
+ driver = "pcie-root-port"
+ bus = "pcie.0"
+ addr = "1c.7"
+ port = "8"
+ chassis = "8"
+
+
+# SCSI storage controller (and storage)
+# =========================================================
+#
+# We use virtio-scsi here so that we can (hot)plug a large
+# number of disks without running into issues; a SCSI disk,
+# backed by a qcow2 disk image on the host's filesystem, is
+# attached to it.
+#
+# We also create an optical disk, mostly for installation
+# purposes: once the guest OS has been succesfully
+# installed, the guest will no longer boot from optical
+# media. If you don't want, or no longer want, to have an
+# optical disk in the guest you can safely comment out
+# all relevant sections below.
+
+[device "scsi"]
+ driver = "virtio-scsi-pci"
+ bus = "pcie.1"
+ addr = "00.0"
+
+[device "scsi-disk"]
+ driver = "scsi-hd"
+ bus = "scsi.0"
+ drive = "disk"
+ bootindex = "1"
+
+[drive "disk"]
+ file = "guest.qcow2" # CHANGE ME
+ format = "qcow2"
+ if = "none"
+
+[device "scsi-optical-disk"]
+ driver = "scsi-cd"
+ bus = "scsi.0"
+ drive = "optical-disk"
+ bootindex = "2"
+
+[drive "optical-disk"]
+ file = "install.iso" # CHANGE ME
+ format = "raw"
+ if = "none"
+
+
+# Ethernet controller
+# =========================================================
+#
+# We use virtio-net for improved performance over emulated
+# hardware; on the host side, we take advantage of user
+# networking so that the QEMU process doesn't require any
+# additional privileges.
+
+[netdev "hostnet"]
+ type = "user"
+
+[device "net"]
+ driver = "virtio-net-pci"
+ netdev = "hostnet"
+ bus = "pcie.2"
+ addr = "00.0"
diff --git a/docs/replay.txt b/docs/replay.txt
index 03e193193f..486c1e0e9d 100644
--- a/docs/replay.txt
+++ b/docs/replay.txt
@@ -225,3 +225,10 @@ recording the virtual machine this filter puts all packets coming from
the outer world into the log. In replay mode packets from the log are
injected into the network device. All interactions with network backend
in replay mode are disabled.
+
+Audio devices
+-------------
+
+Audio data is recorded and replay automatically. The command line for recording
+and replaying must contain identical specifications of audio hardware, e.g.:
+ -soundhw ac97