aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--block.c2
-rw-r--r--block/archipelago.c76
-rw-r--r--block/vmdk.c4
-rw-r--r--blockdev.c30
-rw-r--r--cpu-exec.c17
-rw-r--r--default-configs/ppc-softmmu.mak4
-rw-r--r--default-configs/ppc64-softmmu.mak3
-rw-r--r--exec.c61
-rw-r--r--hw/arm/boot.c71
-rw-r--r--hw/arm/virt.c76
-rw-r--r--hw/block/pflash_cfi01.c18
-rw-r--r--hw/core/loader.c21
-rw-r--r--hw/display/qxl-render.c7
-rw-r--r--hw/display/vga.c12
-rw-r--r--hw/display/vmware_vga.c6
-rw-r--r--hw/display/xenfb.c8
-rw-r--r--hw/gpio/pl061.c59
-rw-r--r--hw/ide/core.c111
-rw-r--r--hw/misc/macio/cuda.c23
-rw-r--r--hw/misc/macio/macio.c19
-rw-r--r--hw/net/spapr_llan.c13
-rw-r--r--hw/nvram/mac_nvram.c70
-rw-r--r--hw/pci-host/apb.c15
-rw-r--r--hw/ppc/mac.h4
-rw-r--r--hw/ppc/mac_newworld.c32
-rw-r--r--hw/ppc/mac_oldworld.c20
-rw-r--r--hw/ppc/spapr.c188
-rw-r--r--hw/ppc/spapr_hcall.c22
-rw-r--r--hw/ppc/spapr_pci.c61
-rw-r--r--hw/ppc/spapr_rtas.c15
-rw-r--r--hw/usb/hcd-xhci.c6
-rw-r--r--include/exec/exec-all.h4
-rw-r--r--include/hw/i386/pc.h5
-rw-r--r--include/hw/loader.h1
-rw-r--r--include/hw/pci-host/spapr.h2
-rw-r--r--include/hw/ppc/ppc.h2
-rw-r--r--include/hw/ppc/spapr.h8
-rw-r--r--include/qapi/util.h17
-rw-r--r--include/qom/cpu.h10
-rw-r--r--include/sysemu/kvm.h2
-rw-r--r--include/ui/console.h25
-rw-r--r--include/ui/qemu-pixman.h4
-rw-r--r--kvm-all.c13
-rw-r--r--linux-user/main.c3
-rw-r--r--linux-user/signal.c184
-rw-r--r--qapi/Makefile.objs2
-rw-r--r--qapi/qapi-util.c34
-rw-r--r--qemu-img.c3
-rw-r--r--qemu-img.texi7
-rw-r--r--qemu-nbd.c100
-rw-r--r--qom/cpu.c5
-rw-r--r--target-arm/cpu.c37
-rw-r--r--target-arm/cpu.h2
-rw-r--r--target-arm/helper.c397
-rw-r--r--target-arm/internals.h30
-rw-r--r--target-arm/machine.c3
-rw-r--r--target-arm/op_helper.c188
-rw-r--r--target-i386/cpu.c6
-rw-r--r--target-i386/cpu.h2
-rw-r--r--target-i386/helper.c5
-rw-r--r--target-lm32/cpu.c2
-rw-r--r--target-lm32/cpu.h2
-rw-r--r--target-lm32/helper.c5
-rw-r--r--target-ppc/helper.h1
-rw-r--r--target-ppc/int_helper.c19
-rw-r--r--target-ppc/kvm.c371
-rw-r--r--target-ppc/kvm_ppc.h12
-rw-r--r--target-ppc/translate.c123
-rw-r--r--target-xtensa/cpu.c2
-rw-r--r--target-xtensa/cpu.h2
-rw-r--r--target-xtensa/helper.c5
-rw-r--r--tests/Makefile3
-rw-r--r--tests/ide-test.c2
-rw-r--r--tests/libqos/malloc-pc.c280
-rw-r--r--tests/libqos/malloc-pc.h9
-rw-r--r--tests/libqos/pci.c111
-rw-r--r--tests/libqos/pci.h10
-rw-r--r--tests/libqos/virtio-pci.c343
-rw-r--r--tests/libqos/virtio-pci.h61
-rw-r--r--tests/libqos/virtio.c257
-rw-r--r--tests/libqos/virtio.h182
-rw-r--r--tests/libqtest.c48
-rw-r--r--tests/libqtest.h7
-rw-r--r--tests/virtio-blk-test.c640
-rw-r--r--trace-events2
-rw-r--r--ui/console.c253
-rw-r--r--ui/qemu-pixman.c90
-rw-r--r--ui/sdl.c5
-rw-r--r--ui/vnc-enc-tight.c12
90 files changed, 4236 insertions, 800 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 8622e019fa..206bf7ea45 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -980,7 +980,7 @@ S: Supported
F: block/rbd.c
Sheepdog
-M: MORITA Kazutaka <morita.kazutaka@lab.ntt.co.jp>
+M: Hitoshi Mitake <mitake.hitoshi@lab.ntt.co.jp>
M: Liu Yuan <namei.unix@gmail.com>
L: sheepdog@lists.wpkg.org
S: Supported
diff --git a/block.c b/block.c
index cb670fd54d..d06dd51632 100644
--- a/block.c
+++ b/block.c
@@ -2246,7 +2246,7 @@ int bdrv_commit(BlockDriverState *bs)
if (!drv)
return -ENOMEDIUM;
-
+
if (!bs->backing_hd) {
return -ENOTSUP;
}
diff --git a/block/archipelago.c b/block/archipelago.c
index 34f72dc5a5..22a7daaa41 100644
--- a/block/archipelago.c
+++ b/block/archipelago.c
@@ -57,6 +57,7 @@
#include "qapi/qmp/qint.h"
#include "qapi/qmp/qstring.h"
#include "qapi/qmp/qjson.h"
+#include "qemu/atomic.h"
#include <inttypes.h>
#include <xseg/xseg.h>
@@ -214,7 +215,7 @@ static void xseg_request_handler(void *state)
xseg_put_request(s->xseg, req, s->srcport);
- if ((__sync_add_and_fetch(&segreq->ref, -1)) == 0) {
+ if (atomic_fetch_dec(&segreq->ref) == 1) {
if (!segreq->failed) {
reqdata->aio_cb->ret = segreq->count;
archipelago_finish_aiocb(reqdata);
@@ -233,7 +234,7 @@ static void xseg_request_handler(void *state)
segreq->count += req->serviced;
xseg_put_request(s->xseg, req, s->srcport);
- if ((__sync_add_and_fetch(&segreq->ref, -1)) == 0) {
+ if (atomic_fetch_dec(&segreq->ref) == 1) {
if (!segreq->failed) {
reqdata->aio_cb->ret = segreq->count;
archipelago_finish_aiocb(reqdata);
@@ -824,78 +825,47 @@ static int archipelago_aio_segmented_rw(BDRVArchipelagoState *s,
ArchipelagoAIOCB *aio_cb,
int op)
{
- int i, ret, segments_nr, last_segment_size;
+ int ret, segments_nr;
+ size_t pos = 0;
ArchipelagoSegmentedRequest *segreq;
- segreq = g_new(ArchipelagoSegmentedRequest, 1);
+ segreq = g_new0(ArchipelagoSegmentedRequest, 1);
if (op == ARCHIP_OP_FLUSH) {
segments_nr = 1;
- segreq->ref = segments_nr;
- segreq->total = count;
- segreq->count = 0;
- segreq->failed = 0;
- ret = archipelago_submit_request(s, 0, count, offset, aio_cb,
- segreq, ARCHIP_OP_FLUSH);
- if (ret < 0) {
- goto err_exit;
- }
- return 0;
+ } else {
+ segments_nr = (int)(count / MAX_REQUEST_SIZE) + \
+ ((count % MAX_REQUEST_SIZE) ? 1 : 0);
}
-
- segments_nr = (int)(count / MAX_REQUEST_SIZE) + \
- ((count % MAX_REQUEST_SIZE) ? 1 : 0);
- last_segment_size = (int)(count % MAX_REQUEST_SIZE);
-
- segreq->ref = segments_nr;
segreq->total = count;
- segreq->count = 0;
- segreq->failed = 0;
+ atomic_mb_set(&segreq->ref, segments_nr);
- for (i = 0; i < segments_nr - 1; i++) {
- ret = archipelago_submit_request(s, i * MAX_REQUEST_SIZE,
- MAX_REQUEST_SIZE,
- offset + i * MAX_REQUEST_SIZE,
- aio_cb, segreq, op);
+ while (segments_nr > 1) {
+ ret = archipelago_submit_request(s, pos,
+ MAX_REQUEST_SIZE,
+ offset + pos,
+ aio_cb, segreq, op);
if (ret < 0) {
goto err_exit;
}
+ count -= MAX_REQUEST_SIZE;
+ pos += MAX_REQUEST_SIZE;
+ segments_nr--;
}
-
- if ((segments_nr > 1) && last_segment_size) {
- ret = archipelago_submit_request(s, i * MAX_REQUEST_SIZE,
- last_segment_size,
- offset + i * MAX_REQUEST_SIZE,
- aio_cb, segreq, op);
- } else if ((segments_nr > 1) && !last_segment_size) {
- ret = archipelago_submit_request(s, i * MAX_REQUEST_SIZE,
- MAX_REQUEST_SIZE,
- offset + i * MAX_REQUEST_SIZE,
- aio_cb, segreq, op);
- } else if (segments_nr == 1) {
- ret = archipelago_submit_request(s, 0, count, offset, aio_cb,
- segreq, op);
- }
+ ret = archipelago_submit_request(s, pos, count, offset + pos,
+ aio_cb, segreq, op);
if (ret < 0) {
goto err_exit;
}
-
return 0;
err_exit:
- __sync_add_and_fetch(&segreq->failed, 1);
- if (segments_nr == 1) {
- if (__sync_add_and_fetch(&segreq->ref, -1) == 0) {
- g_free(segreq);
- }
- } else {
- if ((__sync_add_and_fetch(&segreq->ref, -segments_nr + i)) == 0) {
- g_free(segreq);
- }
+ segreq->failed = 1;
+ if (atomic_fetch_sub(&segreq->ref, segments_nr) == segments_nr) {
+ g_free(segreq);
}
-
return ret;
}
diff --git a/block/vmdk.c b/block/vmdk.c
index 07cb62ceb7..a1cb91131e 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -834,6 +834,7 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
ret = vmdk_add_extent(bs, extent_file, true, sectors,
0, 0, 0, 0, 0, &extent, errp);
if (ret < 0) {
+ bdrv_unref(extent_file);
return ret;
}
extent->flat_start_offset = flat_offset << 9;
@@ -845,14 +846,15 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
} else {
ret = vmdk_open_sparse(bs, extent_file, bs->open_flags, buf, errp);
}
+ g_free(buf);
if (ret) {
- g_free(buf);
bdrv_unref(extent_file);
return ret;
}
extent = &s->extents[s->num_extents - 1];
} else {
error_setg(errp, "Unsupported extent type '%s'", type);
+ bdrv_unref(extent_file);
return -ENOTSUP;
}
extent->type = g_strdup(type);
diff --git a/blockdev.c b/blockdev.c
index e37b068e9e..e919566c16 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -39,6 +39,7 @@
#include "qapi/qmp/types.h"
#include "qapi-visit.h"
#include "qapi/qmp-output-visitor.h"
+#include "qapi/util.h"
#include "sysemu/sysemu.h"
#include "block/block_int.h"
#include "qmp-commands.h"
@@ -274,25 +275,6 @@ static int parse_block_error_action(const char *buf, bool is_read, Error **errp)
}
}
-static inline int parse_enum_option(const char *lookup[], const char *buf,
- int max, int def, Error **errp)
-{
- int i;
-
- if (!buf) {
- return def;
- }
-
- for (i = 0; i < max; i++) {
- if (!strcmp(buf, lookup[i])) {
- return i;
- }
- }
-
- error_setg(errp, "invalid parameter value: %s", buf);
- return def;
-}
-
static bool check_throttle_config(ThrottleConfig *cfg, Error **errp)
{
if (throttle_conflicting(cfg)) {
@@ -456,11 +438,11 @@ static DriveInfo *blockdev_init(const char *file, QDict *bs_opts,
}
detect_zeroes =
- parse_enum_option(BlockdevDetectZeroesOptions_lookup,
- qemu_opt_get(opts, "detect-zeroes"),
- BLOCKDEV_DETECT_ZEROES_OPTIONS_MAX,
- BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF,
- &error);
+ qapi_enum_parse(BlockdevDetectZeroesOptions_lookup,
+ qemu_opt_get(opts, "detect-zeroes"),
+ BLOCKDEV_DETECT_ZEROES_OPTIONS_MAX,
+ BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF,
+ &error);
if (error) {
error_propagate(errp, error);
goto early_err;
diff --git a/cpu-exec.c b/cpu-exec.c
index 7b5d2e21d0..bd93165209 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -295,16 +295,10 @@ static inline TranslationBlock *tb_find_fast(CPUArchState *env)
return tb;
}
-static CPUDebugExcpHandler *debug_excp_handler;
-
-void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
-{
- debug_excp_handler = handler;
-}
-
static void cpu_handle_debug_exception(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
if (!cpu->watchpoint_hit) {
@@ -312,9 +306,8 @@ static void cpu_handle_debug_exception(CPUArchState *env)
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
- if (debug_excp_handler) {
- debug_excp_handler(env);
- }
+
+ cc->debug_excp_handler(cpu);
}
/* main execution loop */
@@ -618,8 +611,8 @@ int cpu_exec(CPUArchState *env)
We avoid this by disabling interrupts when
pc contains a magic address. */
if (interrupt_request & CPU_INTERRUPT_HARD
- && ((IS_M(env) && env->regs[15] < 0xfffffff0)
- || !(env->daif & PSTATE_I))) {
+ && !(env->daif & PSTATE_I)
+ && (!IS_M(env) || env->regs[15] < 0xfffffff0)) {
cpu->exception_index = EXCP_IRQ;
cc->do_interrupt(cpu);
next_tb = 0;
diff --git a/default-configs/ppc-softmmu.mak b/default-configs/ppc-softmmu.mak
index 33f8d84e68..d725b23312 100644
--- a/default-configs/ppc-softmmu.mak
+++ b/default-configs/ppc-softmmu.mak
@@ -45,8 +45,8 @@ CONFIG_PREP=y
CONFIG_MAC=y
CONFIG_E500=y
CONFIG_OPENPIC_KVM=$(and $(CONFIG_E500),$(CONFIG_KVM))
+CONFIG_ETSEC=y
+CONFIG_LIBDECNUMBER=y
# For PReP
CONFIG_MC146818RTC=y
-CONFIG_ETSEC=y
CONFIG_ISA_TESTDEV=y
-CONFIG_LIBDECNUMBER=y
diff --git a/default-configs/ppc64-softmmu.mak b/default-configs/ppc64-softmmu.mak
index 37a15b7ce2..bd30d69347 100644
--- a/default-configs/ppc64-softmmu.mak
+++ b/default-configs/ppc64-softmmu.mak
@@ -46,6 +46,8 @@ CONFIG_PREP=y
CONFIG_MAC=y
CONFIG_E500=y
CONFIG_OPENPIC_KVM=$(and $(CONFIG_E500),$(CONFIG_KVM))
+CONFIG_ETSEC=y
+CONFIG_LIBDECNUMBER=y
# For pSeries
CONFIG_XICS=$(CONFIG_PSERIES)
CONFIG_XICS_KVM=$(and $(CONFIG_PSERIES),$(CONFIG_KVM))
@@ -58,4 +60,3 @@ CONFIG_I82374=y
CONFIG_I8257=y
CONFIG_MC146818RTC=y
CONFIG_ISA_TESTDEV=y
-CONFIG_LIBDECNUMBER=y
diff --git a/exec.c b/exec.c
index dd1e57660a..5aa84d4ae3 100644
--- a/exec.c
+++ b/exec.c
@@ -572,6 +572,16 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
}
+int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
+ int flags)
+{
+ return -ENOSYS;
+}
+
+void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
+{
+}
+
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
@@ -582,12 +592,10 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
- vaddr len_mask = ~(len - 1);
CPUWatchpoint *wp;
- /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
- if ((len & (len - 1)) || (addr & ~len_mask) ||
- len == 0 || len > TARGET_PAGE_SIZE) {
+ /* forbid ranges which are empty or run off the end of the address space */
+ if (len == 0 || (addr + len - 1) <= addr) {
error_report("tried to set invalid watchpoint at %"
VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
return -EINVAL;
@@ -595,7 +603,7 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
wp = g_malloc(sizeof(*wp));
wp->vaddr = addr;
- wp->len_mask = len_mask;
+ wp->len = len;
wp->flags = flags;
/* keep all GDB-injected watchpoints in front */
@@ -616,11 +624,10 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
int flags)
{
- vaddr len_mask = ~(len - 1);
CPUWatchpoint *wp;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if (addr == wp->vaddr && len_mask == wp->len_mask
+ if (addr == wp->vaddr && len == wp->len
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_remove_by_ref(cpu, wp);
return 0;
@@ -650,6 +657,27 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
}
}
}
+
+/* Return true if this watchpoint address matches the specified
+ * access (ie the address range covered by the watchpoint overlaps
+ * partially or completely with the address range covered by the
+ * access).
+ */
+static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
+ vaddr addr,
+ vaddr len)
+{
+ /* We know the lengths are non-zero, but a little caution is
+ * required to avoid errors in the case where the range ends
+ * exactly at the top of the address space and so addr + len
+ * wraps round to zero.
+ */
+ vaddr wpend = wp->vaddr + wp->len - 1;
+ vaddr addrend = addr + len - 1;
+
+ return !(addr > wpend || wp->vaddr > addrend);
+}
+
#endif
/* Add a breakpoint. */
@@ -861,7 +889,7 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
+ if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
/* Avoid trapping reads of pages with a write breakpoint. */
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
iotlb = PHYS_SECTION_WATCH + paddr;
@@ -1649,7 +1677,7 @@ static const MemoryRegionOps notdirty_mem_ops = {
};
/* Generate a debug exception if a watchpoint has been hit. */
-static void check_watchpoint(int offset, int len_mask, int flags)
+static void check_watchpoint(int offset, int len, int flags)
{
CPUState *cpu = current_cpu;
CPUArchState *env = cpu->env_ptr;
@@ -1667,9 +1695,14 @@ static void check_watchpoint(int offset, int len_mask, int flags)
}
vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if ((vaddr == (wp->vaddr & len_mask) ||
- (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
- wp->flags |= BP_WATCHPOINT_HIT;
+ if (cpu_watchpoint_address_matches(wp, vaddr, len)
+ && (wp->flags & flags)) {
+ if (flags == BP_MEM_READ) {
+ wp->flags |= BP_WATCHPOINT_HIT_READ;
+ } else {
+ wp->flags |= BP_WATCHPOINT_HIT_WRITE;
+ }
+ wp->hitaddr = vaddr;
if (!cpu->watchpoint_hit) {
cpu->watchpoint_hit = wp;
tb_check_watchpoint(cpu);
@@ -1694,7 +1727,7 @@ static void check_watchpoint(int offset, int len_mask, int flags)
static uint64_t watch_mem_read(void *opaque, hwaddr addr,
unsigned size)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
switch (size) {
case 1: return ldub_phys(&address_space_memory, addr);
case 2: return lduw_phys(&address_space_memory, addr);
@@ -1706,7 +1739,7 @@ static uint64_t watch_mem_read(void *opaque, hwaddr addr,
static void watch_mem_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
switch (size) {
case 1:
stb_phys(&address_space_memory, addr, val);
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index e32f2f4158..c8dc34f086 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -312,7 +312,26 @@ static void set_kernel_args_old(const struct arm_boot_info *info)
}
}
-static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo)
+/**
+ * load_dtb() - load a device tree binary image into memory
+ * @addr: the address to load the image at
+ * @binfo: struct describing the boot environment
+ * @addr_limit: upper limit of the available memory area at @addr
+ *
+ * Load a device tree supplied by the machine or by the user with the
+ * '-dtb' command line option, and put it at offset @addr in target
+ * memory.
+ *
+ * If @addr_limit contains a meaningful value (i.e., it is strictly greater
+ * than @addr), the device tree is only loaded if its size does not exceed
+ * the limit.
+ *
+ * Returns: the size of the device tree image on success,
+ * 0 if the image size exceeds the limit,
+ * -1 on errors.
+ */
+static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
+ hwaddr addr_limit)
{
void *fdt = NULL;
int size, rc;
@@ -341,6 +360,15 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo)
}
}
+ if (addr_limit > addr && size > (addr_limit - addr)) {
+ /* Installing the device tree blob at addr would exceed addr_limit.
+ * Whether this constitutes failure is up to the caller to decide,
+ * so just return 0 as size, i.e., no error.
+ */
+ g_free(fdt);
+ return 0;
+ }
+
acells = qemu_fdt_getprop_cell(fdt, "/", "#address-cells");
scells = qemu_fdt_getprop_cell(fdt, "/", "#size-cells");
if (acells == 0 || scells == 0) {
@@ -396,11 +424,14 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo)
qemu_fdt_dumpdtb(fdt, size);
- cpu_physical_memory_write(addr, fdt, size);
+ /* Put the DTB into the memory map as a ROM image: this will ensure
+ * the DTB is copied again upon reset, even if addr points into RAM.
+ */
+ rom_add_blob_fixed("dtb", fdt, size, addr);
g_free(fdt);
- return 0;
+ return size;
fail:
g_free(fdt);
@@ -451,7 +482,7 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
int kernel_size;
int initrd_size;
int is_linux = 0;
- uint64_t elf_entry;
+ uint64_t elf_entry, elf_low_addr, elf_high_addr;
int elf_machine;
hwaddr entry, kernel_load_offset;
int big_endian;
@@ -459,6 +490,16 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
/* Load the kernel. */
if (!info->kernel_filename) {
+
+ if (have_dtb(info)) {
+ /* If we have a device tree blob, but no kernel to supply it to,
+ * copy it to the base of RAM for a bootloader to pick up.
+ */
+ if (load_dtb(info->loader_start, info, 0) < 0) {
+ exit(1);
+ }
+ }
+
/* If no kernel specified, do nothing; we will start from address 0
* (typically a boot ROM image) in the same way as hardware.
*/
@@ -508,7 +549,25 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
/* Assume that raw images are linux kernels, and ELF images are not. */
kernel_size = load_elf(info->kernel_filename, NULL, NULL, &elf_entry,
- NULL, NULL, big_endian, elf_machine, 1);
+ &elf_low_addr, &elf_high_addr, big_endian,
+ elf_machine, 1);
+ if (kernel_size > 0 && have_dtb(info)) {
+ /* If there is still some room left at the base of RAM, try and put
+ * the DTB there like we do for images loaded with -bios or -pflash.
+ */
+ if (elf_low_addr > info->loader_start
+ || elf_high_addr < info->loader_start) {
+ /* Pass elf_low_addr as address limit to load_dtb if it may be
+ * pointing into RAM, otherwise pass '0' (no limit)
+ */
+ if (elf_low_addr < info->loader_start) {
+ elf_low_addr = 0;
+ }
+ if (load_dtb(info->loader_start, info, elf_low_addr) < 0) {
+ exit(1);
+ }
+ }
+ }
entry = elf_entry;
if (kernel_size < 0) {
kernel_size = load_uimage(info->kernel_filename, &entry, NULL,
@@ -569,7 +628,7 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
*/
hwaddr dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size,
4096);
- if (load_dtb(dtb_start, info)) {
+ if (load_dtb(dtb_start, info, 0) < 0) {
exit(1);
}
fixupcontext[FIXUP_ARGPTR] = dtb_start;
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 89b4ed5281..8c6b171414 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -37,6 +37,7 @@
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "hw/boards.h"
+#include "hw/loader.h"
#include "exec/address-spaces.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
@@ -371,11 +372,13 @@ static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
2, base, 2, size);
qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupts",
GIC_FDT_IRQ_TYPE_SPI, irq,
- GIC_FDT_IRQ_FLAGS_EDGE_LO_HI);
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
qemu_fdt_setprop_cells(vbi->fdt, nodename, "clocks",
vbi->clock_phandle, vbi->clock_phandle);
qemu_fdt_setprop(vbi->fdt, nodename, "clock-names",
clocknames, sizeof(clocknames));
+
+ qemu_fdt_setprop_string(vbi->fdt, "/chosen", "linux,stdout-path", nodename);
g_free(nodename);
}
@@ -396,7 +399,7 @@ static void create_rtc(const VirtBoardInfo *vbi, qemu_irq *pic)
2, base, 2, size);
qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupts",
GIC_FDT_IRQ_TYPE_SPI, irq,
- GIC_FDT_IRQ_FLAGS_EDGE_LO_HI);
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
qemu_fdt_setprop_cell(vbi->fdt, nodename, "clocks", vbi->clock_phandle);
qemu_fdt_setprop_string(vbi->fdt, nodename, "clock-names", "apb_pclk");
g_free(nodename);
@@ -437,6 +440,73 @@ static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic)
}
}
+static void create_one_flash(const char *name, hwaddr flashbase,
+ hwaddr flashsize)
+{
+ /* Create and map a single flash device. We use the same
+ * parameters as the flash devices on the Versatile Express board.
+ */
+ DriveInfo *dinfo = drive_get_next(IF_PFLASH);
+ DeviceState *dev = qdev_create(NULL, "cfi.pflash01");
+ const uint64_t sectorlength = 256 * 1024;
+
+ if (dinfo && qdev_prop_set_drive(dev, "drive", dinfo->bdrv)) {
+ abort();
+ }
+
+ qdev_prop_set_uint32(dev, "num-blocks", flashsize / sectorlength);
+ qdev_prop_set_uint64(dev, "sector-length", sectorlength);
+ qdev_prop_set_uint8(dev, "width", 4);
+ qdev_prop_set_uint8(dev, "device-width", 2);
+ qdev_prop_set_uint8(dev, "big-endian", 0);
+ qdev_prop_set_uint16(dev, "id0", 0x89);
+ qdev_prop_set_uint16(dev, "id1", 0x18);
+ qdev_prop_set_uint16(dev, "id2", 0x00);
+ qdev_prop_set_uint16(dev, "id3", 0x00);
+ qdev_prop_set_string(dev, "name", name);
+ qdev_init_nofail(dev);
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, flashbase);
+}
+
+static void create_flash(const VirtBoardInfo *vbi)
+{
+ /* Create two flash devices to fill the VIRT_FLASH space in the memmap.
+ * Any file passed via -bios goes in the first of these.
+ */
+ hwaddr flashsize = vbi->memmap[VIRT_FLASH].size / 2;
+ hwaddr flashbase = vbi->memmap[VIRT_FLASH].base;
+ char *nodename;
+
+ if (bios_name) {
+ const char *fn;
+
+ if (drive_get(IF_PFLASH, 0, 0)) {
+ error_report("The contents of the first flash device may be "
+ "specified with -bios or with -drive if=pflash... "
+ "but you cannot use both options at once");
+ exit(1);
+ }
+ fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (!fn || load_image_targphys(fn, flashbase, flashsize) < 0) {
+ error_report("Could not load ROM image '%s'", bios_name);
+ exit(1);
+ }
+ }
+
+ create_one_flash("virt.flash0", flashbase, flashsize);
+ create_one_flash("virt.flash1", flashbase + flashsize, flashsize);
+
+ nodename = g_strdup_printf("/flash@%" PRIx64, flashbase);
+ qemu_fdt_add_subnode(vbi->fdt, nodename);
+ qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible", "cfi-flash");
+ qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ 2, flashbase, 2, flashsize,
+ 2, flashbase + flashsize, 2, flashsize);
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "bank-width", 4);
+ g_free(nodename);
+}
+
static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
{
const VirtBoardInfo *board = (const VirtBoardInfo *)binfo;
@@ -515,6 +585,8 @@ static void machvirt_init(MachineState *machine)
vmstate_register_ram_global(ram);
memory_region_add_subregion(sysmem, vbi->memmap[VIRT_MEM].base, ram);
+ create_flash(vbi);
+
create_gic(vbi, pic);
create_uart(vbi, pic);
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index e2e0462963..1346541c80 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -94,10 +94,13 @@ struct pflash_t {
void *storage;
};
+static int pflash_post_load(void *opaque, int version_id);
+
static const VMStateDescription vmstate_pflash = {
.name = "pflash_cfi01",
.version_id = 1,
.minimum_version_id = 1,
+ .post_load = pflash_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT8(wcycle, pflash_t),
VMSTATE_UINT8(cmd, pflash_t),
@@ -209,11 +212,11 @@ static uint32_t pflash_devid_query(pflash_t *pfl, hwaddr offset)
switch (boff & 0xFF) {
case 0:
resp = pfl->ident0;
- DPRINTF("%s: Manufacturer Code %04x\n", __func__, ret);
+ DPRINTF("%s: Manufacturer Code %04x\n", __func__, resp);
break;
case 1:
resp = pfl->ident1;
- DPRINTF("%s: Device ID Code %04x\n", __func__, ret);
+ DPRINTF("%s: Device ID Code %04x\n", __func__, resp);
break;
default:
DPRINTF("%s: Read Device Information offset=%x\n", __func__,
@@ -988,3 +991,14 @@ MemoryRegion *pflash_cfi01_get_memory(pflash_t *fl)
{
return &fl->mem;
}
+
+static int pflash_post_load(void *opaque, int version_id)
+{
+ pflash_t *pfl = opaque;
+
+ if (!pfl->ro) {
+ DPRINTF("%s: updating bdrv for %s\n", __func__, pfl->name);
+ pflash_update(pfl, 0, pfl->sector_len * pfl->nb_blocs);
+ }
+ return 0;
+}
diff --git a/hw/core/loader.c b/hw/core/loader.c
index 87287b6510..5f3a8598c5 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -89,6 +89,27 @@ int load_image(const char *filename, uint8_t *addr)
return size;
}
+/* return the size or -1 if error */
+ssize_t load_image_size(const char *filename, void *addr, size_t size)
+{
+ int fd;
+ ssize_t actsize;
+
+ fd = open(filename, O_RDONLY | O_BINARY);
+ if (fd < 0) {
+ return -1;
+ }
+
+ actsize = read(fd, addr, size);
+ if (actsize < 0) {
+ close(fd);
+ return -1;
+ }
+ close(fd);
+
+ return actsize;
+}
+
/* read()-like version */
ssize_t read_targphys(const char *name,
int fd, hwaddr dst_addr, size_t nbytes)
diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
index bcc5c3701a..e812ddd6e7 100644
--- a/hw/display/qxl-render.c
+++ b/hw/display/qxl-render.c
@@ -116,13 +116,14 @@ static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl)
qxl->guest_primary.bytes_pp,
qxl->guest_primary.bits_pp);
if (qxl->guest_primary.qxl_stride > 0) {
+ pixman_format_code_t format =
+ qemu_default_pixman_format(qxl->guest_primary.bits_pp, true);
surface = qemu_create_displaysurface_from
(qxl->guest_primary.surface.width,
qxl->guest_primary.surface.height,
- qxl->guest_primary.bits_pp,
+ format,
qxl->guest_primary.abs_stride,
- qxl->guest_primary.data,
- false);
+ qxl->guest_primary.data);
} else {
surface = qemu_create_displaysurface
(qxl->guest_primary.surface.width,
diff --git a/hw/display/vga.c b/hw/display/vga.c
index d5f5a013c7..df0c010823 100644
--- a/hw/display/vga.c
+++ b/hw/display/vga.c
@@ -1725,9 +1725,11 @@ static void vga_draw_graphic(VGACommonState *s, int full_update)
height != s->last_height ||
s->last_depth != depth) {
if (depth == 32 || (depth == 16 && !byteswap)) {
+ pixman_format_code_t format =
+ qemu_default_pixman_format(depth, !byteswap);
surface = qemu_create_displaysurface_from(disp_width,
- height, depth, s->line_offset,
- s->vram_ptr + (s->start_addr * 4), byteswap);
+ height, format, s->line_offset,
+ s->vram_ptr + (s->start_addr * 4));
dpy_gfx_replace_surface(s->con, surface);
} else {
qemu_console_resize(s->con, disp_width, height);
@@ -1743,9 +1745,11 @@ static void vga_draw_graphic(VGACommonState *s, int full_update)
} else if (is_buffer_shared(surface) &&
(full_update || surface_data(surface) != s->vram_ptr
+ (s->start_addr * 4))) {
+ pixman_format_code_t format =
+ qemu_default_pixman_format(depth, !byteswap);
surface = qemu_create_displaysurface_from(disp_width,
- height, depth, s->line_offset,
- s->vram_ptr + (s->start_addr * 4), byteswap);
+ height, format, s->line_offset,
+ s->vram_ptr + (s->start_addr * 4));
dpy_gfx_replace_surface(s->con, surface);
}
diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
index 32026e4127..0c36c7204f 100644
--- a/hw/display/vmware_vga.c
+++ b/hw/display/vmware_vga.c
@@ -1052,10 +1052,12 @@ static inline void vmsvga_check_size(struct vmsvga_state_s *s)
s->new_height != surface_height(surface) ||
s->new_depth != surface_bits_per_pixel(surface)) {
int stride = (s->new_depth * s->new_width) / 8;
+ pixman_format_code_t format =
+ qemu_default_pixman_format(s->new_depth, true);
trace_vmware_setmode(s->new_width, s->new_height, s->new_depth);
surface = qemu_create_displaysurface_from(s->new_width, s->new_height,
- s->new_depth, stride,
- s->vga.vram_ptr, false);
+ format, stride,
+ s->vga.vram_ptr);
dpy_gfx_replace_surface(s->vga.con, surface);
s->invalidated = 1;
}
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 07ddc9deba..8a61e959a6 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -713,15 +713,17 @@ static void xenfb_update(void *opaque)
/* resize if needed */
if (xenfb->do_resize) {
+ pixman_format_code_t format;
+
xenfb->do_resize = 0;
switch (xenfb->depth) {
case 16:
case 32:
/* console.c supported depth -> buffer can be used directly */
+ format = qemu_default_pixman_format(xenfb->depth, true);
surface = qemu_create_displaysurface_from
- (xenfb->width, xenfb->height, xenfb->depth,
- xenfb->row_stride, xenfb->pixels + xenfb->offset,
- false);
+ (xenfb->width, xenfb->height, format,
+ xenfb->row_stride, xenfb->pixels + xenfb->offset);
break;
default:
/* we must convert stuff */
diff --git a/hw/gpio/pl061.c b/hw/gpio/pl061.c
index dd4ea293e2..bd03e99975 100644
--- a/hw/gpio/pl061.c
+++ b/hw/gpio/pl061.c
@@ -37,7 +37,8 @@ typedef struct PL061State {
MemoryRegion iomem;
uint32_t locked;
uint32_t data;
- uint32_t old_data;
+ uint32_t old_out_data;
+ uint32_t old_in_data;
uint32_t dir;
uint32_t isense;
uint32_t ibe;
@@ -63,12 +64,13 @@ typedef struct PL061State {
static const VMStateDescription vmstate_pl061 = {
.name = "pl061",
- .version_id = 2,
- .minimum_version_id = 1,
+ .version_id = 3,
+ .minimum_version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_UINT32(locked, PL061State),
VMSTATE_UINT32(data, PL061State),
- VMSTATE_UINT32(old_data, PL061State),
+ VMSTATE_UINT32(old_out_data, PL061State),
+ VMSTATE_UINT32(old_in_data, PL061State),
VMSTATE_UINT32(dir, PL061State),
VMSTATE_UINT32(isense, PL061State),
VMSTATE_UINT32(ibe, PL061State),
@@ -98,23 +100,52 @@ static void pl061_update(PL061State *s)
uint8_t out;
int i;
+ DPRINTF("dir = %d, data = %d\n", s->dir, s->data);
+
/* Outputs float high. */
/* FIXME: This is board dependent. */
out = (s->data & s->dir) | ~s->dir;
- changed = s->old_data ^ out;
- if (!changed)
- return;
+ changed = s->old_out_data ^ out;
+ if (changed) {
+ s->old_out_data = out;
+ for (i = 0; i < 8; i++) {
+ mask = 1 << i;
+ if (changed & mask) {
+ DPRINTF("Set output %d = %d\n", i, (out & mask) != 0);
+ qemu_set_irq(s->out[i], (out & mask) != 0);
+ }
+ }
+ }
- s->old_data = out;
- for (i = 0; i < 8; i++) {
- mask = 1 << i;
- if (changed & mask) {
- DPRINTF("Set output %d = %d\n", i, (out & mask) != 0);
- qemu_set_irq(s->out[i], (out & mask) != 0);
+ /* Inputs */
+ changed = (s->old_in_data ^ s->data) & ~s->dir;
+ if (changed) {
+ s->old_in_data = s->data;
+ for (i = 0; i < 8; i++) {
+ mask = 1 << i;
+ if (changed & mask) {
+ DPRINTF("Changed input %d = %d\n", i, (s->data & mask) != 0);
+
+ if (!(s->isense & mask)) {
+ /* Edge interrupt */
+ if (s->ibe & mask) {
+ /* Any edge triggers the interrupt */
+ s->istate |= mask;
+ } else {
+ /* Edge is selected by IEV */
+ s->istate |= ~(s->data ^ s->iev) & mask;
+ }
+ }
+ }
}
}
- /* FIXME: Implement input interrupts. */
+ /* Level interrupt */
+ s->istate |= ~(s->data ^ s->iev) & s->isense;
+
+ DPRINTF("istate = %02X\n", s->istate);
+
+ qemu_set_irq(s->irq, (s->istate & s->im) != 0);
}
static uint64_t pl061_read(void *opaque, hwaddr offset,
diff --git a/hw/ide/core.c b/hw/ide/core.c
index b48127f921..191f89321e 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -75,19 +75,29 @@ static void put_le16(uint16_t *p, unsigned int v)
*p = cpu_to_le16(v);
}
+static void ide_identify_size(IDEState *s)
+{
+ uint16_t *p = (uint16_t *)s->identify_data;
+ put_le16(p + 60, s->nb_sectors);
+ put_le16(p + 61, s->nb_sectors >> 16);
+ put_le16(p + 100, s->nb_sectors);
+ put_le16(p + 101, s->nb_sectors >> 16);
+ put_le16(p + 102, s->nb_sectors >> 32);
+ put_le16(p + 103, s->nb_sectors >> 48);
+}
+
static void ide_identify(IDEState *s)
{
uint16_t *p;
unsigned int oldsize;
IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
+ p = (uint16_t *)s->identify_data;
if (s->identify_set) {
- memcpy(s->io_buffer, s->identify_data, sizeof(s->identify_data));
- return;
+ goto fill_buffer;
}
+ memset(p, 0, sizeof(s->identify_data));
- memset(s->io_buffer, 0, 512);
- p = (uint16_t *)s->io_buffer;
put_le16(p + 0, 0x0040);
put_le16(p + 1, s->cylinders);
put_le16(p + 3, s->heads);
@@ -116,8 +126,8 @@ static void ide_identify(IDEState *s)
put_le16(p + 58, oldsize >> 16);
if (s->mult_sectors)
put_le16(p + 59, 0x100 | s->mult_sectors);
- put_le16(p + 60, s->nb_sectors);
- put_le16(p + 61, s->nb_sectors >> 16);
+ /* *(p + 60) := nb_sectors -- see ide_identify_size */
+ /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
put_le16(p + 62, 0x07); /* single word dma0-2 supported */
put_le16(p + 63, 0x07); /* mdma0-2 supported */
put_le16(p + 64, 0x03); /* pio3-4 supported */
@@ -162,10 +172,10 @@ static void ide_identify(IDEState *s)
}
put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
put_le16(p + 93, 1 | (1 << 14) | 0x2000);
- put_le16(p + 100, s->nb_sectors);
- put_le16(p + 101, s->nb_sectors >> 16);
- put_le16(p + 102, s->nb_sectors >> 32);
- put_le16(p + 103, s->nb_sectors >> 48);
+ /* *(p + 100) := nb_sectors -- see ide_identify_size */
+ /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
+ /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
+ /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
if (dev && dev->conf.physical_block_size)
put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
@@ -180,21 +190,23 @@ static void ide_identify(IDEState *s)
put_le16(p + 169, 1); /* TRIM support */
}
- memcpy(s->identify_data, p, sizeof(s->identify_data));
+ ide_identify_size(s);
s->identify_set = 1;
+
+fill_buffer:
+ memcpy(s->io_buffer, p, sizeof(s->identify_data));
}
static void ide_atapi_identify(IDEState *s)
{
uint16_t *p;
+ p = (uint16_t *)s->identify_data;
if (s->identify_set) {
- memcpy(s->io_buffer, s->identify_data, sizeof(s->identify_data));
- return;
+ goto fill_buffer;
}
+ memset(p, 0, sizeof(s->identify_data));
- memset(s->io_buffer, 0, 512);
- p = (uint16_t *)s->io_buffer;
/* Removable CDROM, 50us response, 12 byte packets */
put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
@@ -230,11 +242,36 @@ static void ide_atapi_identify(IDEState *s)
}
put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
+ if (s->wwn) {
+ put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
+ put_le16(p + 87, (1 << 8)); /* WWN enabled */
+ }
+
#ifdef USE_DMA_CDROM
put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
#endif
- memcpy(s->identify_data, p, sizeof(s->identify_data));
+
+ if (s->wwn) {
+ /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
+ put_le16(p + 108, s->wwn >> 48);
+ put_le16(p + 109, s->wwn >> 32);
+ put_le16(p + 110, s->wwn >> 16);
+ put_le16(p + 111, s->wwn);
+ }
+
s->identify_set = 1;
+
+fill_buffer:
+ memcpy(s->io_buffer, p, sizeof(s->identify_data));
+}
+
+static void ide_cfata_identify_size(IDEState *s)
+{
+ uint16_t *p = (uint16_t *)s->identify_data;
+ put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
+ put_le16(p + 8, s->nb_sectors); /* Sectors per card */
+ put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
+ put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
}
static void ide_cfata_identify(IDEState *s)
@@ -242,10 +279,10 @@ static void ide_cfata_identify(IDEState *s)
uint16_t *p;
uint32_t cur_sec;
- p = (uint16_t *) s->identify_data;
- if (s->identify_set)
+ p = (uint16_t *)s->identify_data;
+ if (s->identify_set) {
goto fill_buffer;
-
+ }
memset(p, 0, sizeof(s->identify_data));
cur_sec = s->cylinders * s->heads * s->sectors;
@@ -254,8 +291,8 @@ static void ide_cfata_identify(IDEState *s)
put_le16(p + 1, s->cylinders); /* Default cylinders */
put_le16(p + 3, s->heads); /* Default heads */
put_le16(p + 6, s->sectors); /* Default sectors per track */
- put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
- put_le16(p + 8, s->nb_sectors); /* Sectors per card */
+ /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
+ /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
put_le16(p + 22, 0x0004); /* ECC bytes */
padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
@@ -276,8 +313,8 @@ static void ide_cfata_identify(IDEState *s)
put_le16(p + 58, cur_sec >> 16); /* Current capacity */
if (s->mult_sectors) /* Multiple sector setting */
put_le16(p + 59, 0x100 | s->mult_sectors);
- put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
- put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
+ /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
+ /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
put_le16(p + 63, 0x0203); /* Multiword DMA capability */
put_le16(p + 64, 0x0001); /* Flow Control PIO support */
put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
@@ -297,6 +334,7 @@ static void ide_cfata_identify(IDEState *s)
put_le16(p + 160, 0x8100); /* Power requirement */
put_le16(p + 161, 0x8001); /* CF command set */
+ ide_cfata_identify_size(s);
s->identify_set = 1;
fill_buffer:
@@ -2115,6 +2153,28 @@ static bool ide_cd_is_medium_locked(void *opaque)
return ((IDEState *)opaque)->tray_locked;
}
+static void ide_resize_cb(void *opaque)
+{
+ IDEState *s = opaque;
+ uint64_t nb_sectors;
+
+ if (!s->identify_set) {
+ return;
+ }
+
+ bdrv_get_geometry(s->bs, &nb_sectors);
+ s->nb_sectors = nb_sectors;
+
+ /* Update the identify data buffer. */
+ if (s->drive_kind == IDE_CFATA) {
+ ide_cfata_identify_size(s);
+ } else {
+ /* IDE_CD uses a different set of callbacks entirely. */
+ assert(s->drive_kind != IDE_CD);
+ ide_identify_size(s);
+ }
+}
+
static const BlockDevOps ide_cd_block_ops = {
.change_media_cb = ide_cd_change_cb,
.eject_request_cb = ide_cd_eject_request_cb,
@@ -2122,6 +2182,10 @@ static const BlockDevOps ide_cd_block_ops = {
.is_medium_locked = ide_cd_is_medium_locked,
};
+static const BlockDevOps ide_hd_block_ops = {
+ .resize_cb = ide_resize_cb,
+};
+
int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind,
const char *version, const char *serial, const char *model,
uint64_t wwn,
@@ -2158,6 +2222,7 @@ int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind,
error_report("Can't use a read-only drive");
return -1;
}
+ bdrv_set_dev_ops(bs, &ide_hd_block_ops, s);
}
if (serial) {
pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c
index ff6051defe..b4273aa171 100644
--- a/hw/misc/macio/cuda.c
+++ b/hw/misc/macio/cuda.c
@@ -123,13 +123,22 @@ static void cuda_update_irq(CUDAState *s)
}
}
+static uint64_t get_tb(uint64_t freq)
+{
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ freq, get_ticks_per_sec());
+}
+
static unsigned int get_counter(CUDATimer *s)
{
int64_t d;
unsigned int counter;
+ uint64_t tb_diff;
+
+ /* Reverse of the tb calculation algorithm that Mac OS X uses on bootup. */
+ tb_diff = get_tb(s->frequency) - s->load_time;
+ d = (tb_diff * 0xBF401675E5DULL) / (s->frequency << 24);
- d = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->load_time,
- CUDA_TIMER_FREQ, get_ticks_per_sec());
if (s->index == 0) {
/* the timer goes down from latch to -1 (period of latch + 2) */
if (d <= (s->counter_value + 1)) {
@@ -147,7 +156,7 @@ static unsigned int get_counter(CUDATimer *s)
static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
{
CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val);
- ti->load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ ti->load_time = get_tb(s->frequency);
ti->counter_value = val;
cuda_timer_update(s, ti, ti->load_time);
}
@@ -688,6 +697,8 @@ static void cuda_realizefn(DeviceState *dev, Error **errp)
struct tm tm;
s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s);
+ s->timers[0].frequency = s->frequency;
+ s->timers[1].frequency = s->frequency;
qemu_get_timedate(&tm, 0);
s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
@@ -713,6 +724,11 @@ static void cuda_initfn(Object *obj)
DEVICE(obj), "adb.0");
}
+static Property cuda_properties[] = {
+ DEFINE_PROP_UINT64("frequency", CUDAState, frequency, 0),
+ DEFINE_PROP_END_OF_LIST()
+};
+
static void cuda_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -720,6 +736,7 @@ static void cuda_class_init(ObjectClass *oc, void *data)
dc->realize = cuda_realizefn;
dc->reset = cuda_reset;
dc->vmsd = &vmstate_cuda;
+ dc->props = cuda_properties;
}
static const TypeInfo cuda_type_info = {
diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c
index 47f45f5d2c..e0f1e88f54 100644
--- a/hw/misc/macio/macio.c
+++ b/hw/misc/macio/macio.c
@@ -42,6 +42,7 @@ typedef struct MacIOState
void *dbdma;
MemoryRegion *pic_mem;
MemoryRegion *escc_mem;
+ uint64_t frequency;
} MacIOState;
#define OLDWORLD_MACIO(obj) \
@@ -243,13 +244,18 @@ static void timer_write(void *opaque, hwaddr addr, uint64_t value,
static uint64_t timer_read(void *opaque, hwaddr addr, unsigned size)
{
uint32_t value = 0;
+ uint64_t systime = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ uint64_t kltime;
+
+ kltime = muldiv64(systime, 4194300, get_ticks_per_sec() * 4);
+ kltime = muldiv64(kltime, 18432000, 1048575);
switch (addr) {
case 0x38:
- value = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ value = kltime;
break;
case 0x3c:
- value = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 32;
+ value = kltime >> 32;
break;
}
@@ -346,12 +352,19 @@ static void macio_newworld_class_init(ObjectClass *oc, void *data)
pdc->device_id = PCI_DEVICE_ID_APPLE_UNI_N_KEYL;
}
+static Property macio_properties[] = {
+ DEFINE_PROP_UINT64("frequency", MacIOState, frequency, 0),
+ DEFINE_PROP_END_OF_LIST()
+};
+
static void macio_class_init(ObjectClass *klass, void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
k->vendor_id = PCI_VENDOR_ID_APPLE;
k->class_id = PCI_CLASS_OTHERS << 8;
+ dc->props = macio_properties;
}
static const TypeInfo macio_oldworld_type_info = {
@@ -398,6 +411,8 @@ void macio_init(PCIDevice *d,
macio_state->escc_mem = escc_mem;
/* Note: this code is strongly inspirated from the corresponding code
in PearPC */
+ qdev_prop_set_uint64(DEVICE(&macio_state->cuda), "frequency",
+ macio_state->frequency);
qdev_init_nofail(DEVICE(d));
}
diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c
index 2d47df6930..23c47d397c 100644
--- a/hw/net/spapr_llan.c
+++ b/hw/net/spapr_llan.c
@@ -72,7 +72,14 @@ typedef uint64_t vlan_bd_t;
#define VLAN_RXQ_BD_OFF 0
#define VLAN_FILTER_BD_OFF 8
#define VLAN_RX_BDS_OFF 16
-#define VLAN_MAX_BUFS ((SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF) / 8)
+/*
+ * The final 8 bytes of the buffer list is a counter of frames dropped
+ * because there was not a buffer in the buffer list capable of holding
+ * the frame. We must avoid it, or the operating system will report garbage
+ * for this statistic.
+ */
+#define VLAN_RX_BDS_LEN (SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF - 8)
+#define VLAN_MAX_BUFS (VLAN_RX_BDS_LEN / 8)
#define TYPE_VIO_SPAPR_VLAN_DEVICE "spapr-vlan"
#define VIO_SPAPR_VLAN_DEVICE(obj) \
@@ -119,7 +126,7 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
do {
buf_ptr += 8;
- if (buf_ptr >= SPAPR_TCE_PAGE_SIZE) {
+ if (buf_ptr >= (VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF)) {
buf_ptr = VLAN_RX_BDS_OFF;
}
@@ -397,7 +404,7 @@ static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
do {
dev->add_buf_ptr += 8;
- if (dev->add_buf_ptr >= SPAPR_TCE_PAGE_SIZE) {
+ if (dev->add_buf_ptr >= (VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF)) {
dev->add_buf_ptr = VLAN_RX_BDS_OFF;
}
diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c
index 170b10b766..d35f8a3121 100644
--- a/hw/nvram/mac_nvram.c
+++ b/hw/nvram/mac_nvram.c
@@ -26,6 +26,7 @@
#include "hw/nvram/openbios_firmware_abi.h"
#include "sysemu/sysemu.h"
#include "hw/ppc/mac.h"
+#include <zlib.h>
/* debug NVR */
//#define DEBUG_NVR
@@ -39,29 +40,6 @@
#define DEF_SYSTEM_SIZE 0xc10
-/* Direct access to NVRAM */
-uint8_t macio_nvram_read(MacIONVRAMState *s, uint32_t addr)
-{
- uint32_t ret;
-
- if (addr < s->size) {
- ret = s->data[addr];
- } else {
- ret = -1;
- }
- NVR_DPRINTF("read addr %04" PRIx32 " val %" PRIx8 "\n", addr, ret);
-
- return ret;
-}
-
-void macio_nvram_write(MacIONVRAMState *s, uint32_t addr, uint8_t val)
-{
- NVR_DPRINTF("write addr %04" PRIx32 " val %" PRIx8 "\n", addr, val);
- if (addr < s->size) {
- s->data[addr] = val;
- }
-}
-
/* macio style NVRAM device */
static void macio_nvram_writeb(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
@@ -89,6 +67,10 @@ static uint64_t macio_nvram_readb(void *opaque, hwaddr addr,
static const MemoryRegionOps macio_nvram_ops = {
.read = macio_nvram_readb,
.write = macio_nvram_writeb,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 1,
.endianness = DEVICE_BIG_ENDIAN,
};
@@ -156,15 +138,16 @@ static void macio_nvram_register_types(void)
}
/* Set up a system OpenBIOS NVRAM partition */
-void pmac_format_nvram_partition (MacIONVRAMState *nvr, int len)
+static void pmac_format_nvram_partition_of(MacIONVRAMState *nvr, int off,
+ int len)
{
unsigned int i;
- uint32_t start = 0, end;
+ uint32_t start = off, end;
struct OpenBIOS_nvpart_v1 *part_header;
// OpenBIOS nvram variables
// Variable partition
- part_header = (struct OpenBIOS_nvpart_v1 *)nvr->data;
+ part_header = (struct OpenBIOS_nvpart_v1 *)&nvr->data[start];
part_header->signature = OPENBIOS_PART_SYSTEM;
pstrcpy(part_header->name, sizeof(part_header->name), "system");
@@ -192,4 +175,39 @@ void pmac_format_nvram_partition (MacIONVRAMState *nvr, int len)
OpenBIOS_finish_partition(part_header, end - start);
}
+#define OSX_NVRAM_SIGNATURE (0x5A)
+
+/* Set up a Mac OS X NVRAM partition */
+static void pmac_format_nvram_partition_osx(MacIONVRAMState *nvr, int off,
+ int len)
+{
+ uint32_t start = off;
+ struct OpenBIOS_nvpart_v1 *part_header;
+ unsigned char *data = &nvr->data[start];
+
+ /* empty partition */
+ part_header = (struct OpenBIOS_nvpart_v1 *)data;
+ part_header->signature = OSX_NVRAM_SIGNATURE;
+ pstrcpy(part_header->name, sizeof(part_header->name), "wwwwwwwwwwww");
+
+ OpenBIOS_finish_partition(part_header, len);
+
+ /* Generation */
+ stl_be_p(&data[20], 2);
+
+ /* Adler32 checksum */
+ stl_be_p(&data[16], adler32(0, &data[20], len - 20));
+}
+
+/* Set up NVRAM with OF and OSX partitions */
+void pmac_format_nvram_partition(MacIONVRAMState *nvr, int len)
+{
+ /*
+ * Mac OS X expects side "B" of the flash at the second half of NVRAM,
+ * so we use half of the chip for OF and the other half for a free OSX
+ * partition.
+ */
+ pmac_format_nvram_partition_of(nvr, 0, len / 2);
+ pmac_format_nvram_partition_osx(nvr, len / 2, len / 2);
+}
type_init(macio_nvram_register_types)
diff --git a/hw/pci-host/apb.c b/hw/pci-host/apb.c
index 762ebdd8fe..f573875baf 100644
--- a/hw/pci-host/apb.c
+++ b/hw/pci-host/apb.c
@@ -142,6 +142,7 @@ typedef struct APBState {
IOMMUState iommu;
uint32_t pci_control[16];
uint32_t pci_irq_map[8];
+ uint32_t pci_err_irq_map[4];
uint32_t obio_irq_map[32];
qemu_irq *pbm_irqs;
qemu_irq *ivec_irqs;
@@ -437,7 +438,7 @@ static void apb_config_writel (void *opaque, hwaddr addr,
pbm_check_irqs(s);
}
break;
- case 0x1000 ... 0x1080: /* OBIO interrupt control */
+ case 0x1000 ... 0x107f: /* OBIO interrupt control */
if (addr & 4) {
unsigned int ino = ((addr & 0xff) >> 3);
s->obio_irq_map[ino] &= PBM_PCI_IMR_MASK;
@@ -515,13 +516,20 @@ static uint64_t apb_config_readl (void *opaque,
val = 0;
}
break;
- case 0x1000 ... 0x1080: /* OBIO interrupt control */
+ case 0x1000 ... 0x107f: /* OBIO interrupt control */
if (addr & 4) {
val = s->obio_irq_map[(addr & 0xff) >> 3];
} else {
val = 0;
}
break;
+ case 0x1080 ... 0x108f: /* PCI bus error */
+ if (addr & 4) {
+ val = s->pci_err_irq_map[(addr & 0xf) >> 3];
+ } else {
+ val = 0;
+ }
+ break;
case 0x2000 ... 0x202f: /* PCI control */
val = s->pci_control[(addr & 0x3f) >> 2];
break;
@@ -753,6 +761,9 @@ static int pci_pbm_init_device(SysBusDevice *dev)
for (i = 0; i < 8; i++) {
s->pci_irq_map[i] = (0x1f << 6) | (i << 2);
}
+ for (i = 0; i < 2; i++) {
+ s->pci_err_irq_map[i] = (0x1f << 6) | 0x30;
+ }
for (i = 0; i < 32; i++) {
s->obio_irq_map[i] = ((0x1f << 6) | 0x20) + i;
}
diff --git a/hw/ppc/mac.h b/hw/ppc/mac.h
index c1faf9ce27..aff2b9a566 100644
--- a/hw/ppc/mac.h
+++ b/hw/ppc/mac.h
@@ -57,6 +57,7 @@ typedef struct CUDATimer {
uint16_t counter_value;
int64_t load_time;
int64_t next_irq_time;
+ uint64_t frequency;
QEMUTimer *timer;
} CUDATimer;
@@ -97,6 +98,7 @@ typedef struct CUDAState {
CUDATimer timers[2];
uint32_t tick_offset;
+ uint64_t frequency;
uint8_t last_b;
uint8_t last_acr;
@@ -178,6 +180,4 @@ typedef struct MacIONVRAMState {
} MacIONVRAMState;
void pmac_format_nvram_partition (MacIONVRAMState *nvr, int len);
-uint8_t macio_nvram_read(MacIONVRAMState *s, uint32_t addr);
-void macio_nvram_write(MacIONVRAMState *s, uint32_t addr, uint8_t val);
#endif /* !defined(__PPC_MAC_H__) */
diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
index 7148629a0f..1626db44ef 100644
--- a/hw/ppc/mac_newworld.c
+++ b/hw/ppc/mac_newworld.c
@@ -176,6 +176,8 @@ static void ppc_core99_init(MachineState *machine)
SysBusDevice *s;
DeviceState *dev;
int *token = g_new(int, 1);
+ hwaddr nvram_addr = 0xFFF04000;
+ uint64_t tbfreq;
linux_boot = (kernel_filename != NULL);
@@ -373,6 +375,14 @@ static void ppc_core99_init(MachineState *machine)
pci_bus = pci_pmac_init(pic, get_system_memory(), get_system_io());
machine_arch = ARCH_MAC99;
}
+
+ /* Timebase Frequency */
+ if (kvm_enabled()) {
+ tbfreq = kvmppc_get_tbfreq();
+ } else {
+ tbfreq = TBFREQ;
+ }
+
/* init basic PC hardware */
escc_mem = escc_init(0, pic[0x25], pic[0x24],
serial_hds[0], serial_hds[1], ESCC_CLOCK, 4);
@@ -386,6 +396,7 @@ static void ppc_core99_init(MachineState *machine)
qdev_connect_gpio_out(dev, 2, pic[0x02]); /* IDE DMA */
qdev_connect_gpio_out(dev, 3, pic[0x0e]); /* IDE */
qdev_connect_gpio_out(dev, 4, pic[0x03]); /* IDE DMA */
+ qdev_prop_set_uint64(dev, "frequency", tbfreq);
macio_init(macio, pic_mem, escc_bar);
/* We only emulate 2 out of 3 IDE controllers for now */
@@ -427,11 +438,18 @@ static void ppc_core99_init(MachineState *machine)
}
/* The NewWorld NVRAM is not located in the MacIO device */
+#ifdef CONFIG_KVM
+ if (kvm_enabled() && getpagesize() > 4096) {
+ /* We can't combine read-write and read-only in a single page, so
+ move the NVRAM out of ROM again for KVM */
+ nvram_addr = 0xFFE00000;
+ }
+#endif
dev = qdev_create(NULL, TYPE_MACIO_NVRAM);
qdev_prop_set_uint32(dev, "size", 0x2000);
qdev_prop_set_uint32(dev, "it_shift", 1);
qdev_init_nofail(dev);
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xFFF04000);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, nvram_addr);
nvr = MACIO_NVRAM(dev);
pmac_format_nvram_partition(nvr, 0x2000);
/* No PCI init: the BIOS will do it */
@@ -462,28 +480,34 @@ static void ppc_core99_init(MachineState *machine)
#ifdef CONFIG_KVM
uint8_t *hypercall;
- fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, kvmppc_get_tbfreq());
hypercall = g_malloc(16);
kvmppc_get_hypercall(env, hypercall, 16);
fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16);
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid());
#endif
- } else {
- fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, TBFREQ);
}
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq);
/* Mac OS X requires a "known good" clock-frequency value; pass it one. */
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_CLOCKFREQ, CLOCKFREQ);
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_BUSFREQ, BUSFREQ);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_NVRAM_ADDR, nvram_addr);
qemu_register_boot_set(fw_cfg_boot_set, fw_cfg);
}
+static int core99_kvm_type(const char *arg)
+{
+ /* Always force PR KVM */
+ return 2;
+}
+
static QEMUMachine core99_machine = {
.name = "mac99",
.desc = "Mac99 based PowerMAC",
.init = ppc_core99_init,
.max_cpus = MAX_CPUS,
.default_boot_order = "cd",
+ .kvm_type = core99_kvm_type,
};
static void core99_machine_init(void)
diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c
index 1c5d9f45ef..be9a194038 100644
--- a/hw/ppc/mac_oldworld.c
+++ b/hw/ppc/mac_oldworld.c
@@ -103,6 +103,7 @@ static void ppc_heathrow_init(MachineState *machine)
uint16_t ppc_boot_device;
DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
void *fw_cfg;
+ uint64_t tbfreq;
linux_boot = (kernel_filename != NULL);
@@ -251,6 +252,13 @@ static void ppc_heathrow_init(MachineState *machine)
}
}
+ /* Timebase Frequency */
+ if (kvm_enabled()) {
+ tbfreq = kvmppc_get_tbfreq();
+ } else {
+ tbfreq = TBFREQ;
+ }
+
/* init basic PC hardware */
if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) {
hw_error("Only 6xx bus is supported on heathrow machine\n");
@@ -279,6 +287,7 @@ static void ppc_heathrow_init(MachineState *machine)
qdev_connect_gpio_out(dev, 2, pic[0x02]); /* IDE-0 DMA */
qdev_connect_gpio_out(dev, 3, pic[0x0E]); /* IDE-1 */
qdev_connect_gpio_out(dev, 4, pic[0x03]); /* IDE-1 DMA */
+ qdev_prop_set_uint64(dev, "frequency", tbfreq);
macio_init(macio, pic_mem, escc_bar);
macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio),
@@ -331,15 +340,13 @@ static void ppc_heathrow_init(MachineState *machine)
#ifdef CONFIG_KVM
uint8_t *hypercall;
- fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, kvmppc_get_tbfreq());
hypercall = g_malloc(16);
kvmppc_get_hypercall(env, hypercall, 16);
fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16);
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid());
#endif
- } else {
- fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, TBFREQ);
}
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq);
/* Mac OS X requires a "known good" clock-frequency value; pass it one. */
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_CLOCKFREQ, CLOCKFREQ);
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_BUSFREQ, BUSFREQ);
@@ -347,6 +354,12 @@ static void ppc_heathrow_init(MachineState *machine)
qemu_register_boot_set(fw_cfg_boot_set, fw_cfg);
}
+static int heathrow_kvm_type(const char *arg)
+{
+ /* Always force PR KVM */
+ return 2;
+}
+
static QEMUMachine heathrow_machine = {
.name = "g3beige",
.desc = "Heathrow based PowerMAC",
@@ -356,6 +369,7 @@ static QEMUMachine heathrow_machine = {
.is_default = 1,
#endif
.default_boot_order = "cd", /* TOFIX "cad" when Mac floppy is implemented */
+ .kvm_type = heathrow_kvm_type,
};
static void heathrow_machine_init(void)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 5cb452f234..2ab4460f04 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -71,6 +71,7 @@
*/
#define FDT_MAX_SIZE 0x40000
#define RTAS_MAX_SIZE 0x10000
+#define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */
#define FW_MAX_SIZE 0x400000
#define FW_FILE_NAME "slof.bin"
#define FW_OVERHEAD 0x2800000
@@ -80,7 +81,7 @@
#define TIMEBASE_FREQ 512000000ULL
-#define MAX_CPUS 256
+#define MAX_CPUS 255
#define PHANDLE_XICP 0x00001111
@@ -283,6 +284,19 @@ static size_t create_page_sizes_prop(CPUPPCState *env, uint32_t *prop,
return (p - prop) * sizeof(uint32_t);
}
+static hwaddr spapr_node0_size(void)
+{
+ if (nb_numa_nodes) {
+ int i;
+ for (i = 0; i < nb_numa_nodes; ++i) {
+ if (numa_info[i].node_mem) {
+ return MIN(pow2floor(numa_info[i].node_mem), ram_size);
+ }
+ }
+ }
+ return ram_size;
+}
+
#define _FDT(exp) \
do { \
int ret = (exp); \
@@ -319,6 +333,7 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
QemuOpts *opts = qemu_opts_find(qemu_find_opts("smp-opts"), NULL);
unsigned sockets = opts ? qemu_opt_get_number(opts, "sockets", 0) : 0;
uint32_t cpus_per_socket = sockets ? (smp_cpus / sockets) : 1;
+ char *buf;
add_str(hypertas, "hcall-pft");
add_str(hypertas, "hcall-term");
@@ -348,6 +363,29 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
_FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)")));
_FDT((fdt_property_string(fdt, "compatible", "qemu,pseries")));
+ /*
+ * Add info to guest to indentify which host is it being run on
+ * and what is the uuid of the guest
+ */
+ if (kvmppc_get_host_model(&buf)) {
+ _FDT((fdt_property_string(fdt, "host-model", buf)));
+ g_free(buf);
+ }
+ if (kvmppc_get_host_serial(&buf)) {
+ _FDT((fdt_property_string(fdt, "host-serial", buf)));
+ g_free(buf);
+ }
+
+ buf = g_strdup_printf(UUID_FMT, qemu_uuid[0], qemu_uuid[1],
+ qemu_uuid[2], qemu_uuid[3], qemu_uuid[4],
+ qemu_uuid[5], qemu_uuid[6], qemu_uuid[7],
+ qemu_uuid[8], qemu_uuid[9], qemu_uuid[10],
+ qemu_uuid[11], qemu_uuid[12], qemu_uuid[13],
+ qemu_uuid[14], qemu_uuid[15]);
+
+ _FDT((fdt_property_string(fdt, "vm,uuid", buf)));
+ g_free(buf);
+
_FDT((fdt_property_cell(fdt, "#address-cells", 0x2)));
_FDT((fdt_property_cell(fdt, "#size-cells", 0x2)));
@@ -502,6 +540,15 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
_FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX)));
+ /*
+ * According to PAPR, rtas ibm,os-term, does not gaurantee a return
+ * back to the guest cpu.
+ *
+ * While an additional ibm,extended-os-term property indicates that
+ * rtas call return will always occur. Set this property.
+ */
+ _FDT((fdt_property(fdt, "ibm,extended-os-term", NULL, 0)));
+
_FDT((fdt_end_node(fdt)));
/* interrupt controller */
@@ -597,72 +644,75 @@ int spapr_h_cas_compose_response(target_ulong addr, target_ulong size)
return 0;
}
-static int spapr_populate_memory(sPAPREnvironment *spapr, void *fdt)
+static void spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start,
+ hwaddr size)
{
- uint32_t associativity[] = {cpu_to_be32(0x4), cpu_to_be32(0x0),
- cpu_to_be32(0x0), cpu_to_be32(0x0),
- cpu_to_be32(0x0)};
+ uint32_t associativity[] = {
+ cpu_to_be32(0x4), /* length */
+ cpu_to_be32(0x0), cpu_to_be32(0x0),
+ cpu_to_be32(0x0), cpu_to_be32(nodeid)
+ };
char mem_name[32];
- hwaddr node0_size, mem_start, node_size;
uint64_t mem_reg_property[2];
- int i, off;
+ int off;
- /* memory node(s) */
- if (nb_numa_nodes > 1 && numa_info[0].node_mem < ram_size) {
- node0_size = numa_info[0].node_mem;
- } else {
- node0_size = ram_size;
- }
+ mem_reg_property[0] = cpu_to_be64(start);
+ mem_reg_property[1] = cpu_to_be64(size);
- /* RMA */
- mem_reg_property[0] = 0;
- mem_reg_property[1] = cpu_to_be64(spapr->rma_size);
- off = fdt_add_subnode(fdt, 0, "memory@0");
+ sprintf(mem_name, "memory@" TARGET_FMT_lx, start);
+ off = fdt_add_subnode(fdt, 0, mem_name);
_FDT(off);
_FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
_FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
sizeof(mem_reg_property))));
_FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
sizeof(associativity))));
+}
+
+static int spapr_populate_memory(sPAPREnvironment *spapr, void *fdt)
+{
+ hwaddr mem_start, node_size;
+ int i, nb_nodes = nb_numa_nodes;
+ NodeInfo *nodes = numa_info;
+ NodeInfo ramnode;
+
+ /* No NUMA nodes, assume there is just one node with whole RAM */
+ if (!nb_numa_nodes) {
+ nb_nodes = 1;
+ ramnode.node_mem = ram_size;
+ nodes = &ramnode;
+ }
- /* RAM: Node 0 */
- if (node0_size > spapr->rma_size) {
- mem_reg_property[0] = cpu_to_be64(spapr->rma_size);
- mem_reg_property[1] = cpu_to_be64(node0_size - spapr->rma_size);
-
- sprintf(mem_name, "memory@" TARGET_FMT_lx, spapr->rma_size);
- off = fdt_add_subnode(fdt, 0, mem_name);
- _FDT(off);
- _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
- _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
- sizeof(mem_reg_property))));
- _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
- sizeof(associativity))));
- }
-
- /* RAM: Node 1 and beyond */
- mem_start = node0_size;
- for (i = 1; i < nb_numa_nodes; i++) {
- mem_reg_property[0] = cpu_to_be64(mem_start);
+ for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
+ if (!nodes[i].node_mem) {
+ continue;
+ }
if (mem_start >= ram_size) {
node_size = 0;
} else {
- node_size = numa_info[i].node_mem;
+ node_size = nodes[i].node_mem;
if (node_size > ram_size - mem_start) {
node_size = ram_size - mem_start;
}
}
- mem_reg_property[1] = cpu_to_be64(node_size);
- associativity[3] = associativity[4] = cpu_to_be32(i);
- sprintf(mem_name, "memory@" TARGET_FMT_lx, mem_start);
- off = fdt_add_subnode(fdt, 0, mem_name);
- _FDT(off);
- _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
- _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
- sizeof(mem_reg_property))));
- _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
- sizeof(associativity))));
- mem_start += node_size;
+ if (!mem_start) {
+ /* ppc_spapr_init() checks for rma_size <= node0_size already */
+ spapr_populate_memory_node(fdt, i, 0, spapr->rma_size);
+ mem_start += spapr->rma_size;
+ node_size -= spapr->rma_size;
+ }
+ for ( ; node_size; ) {
+ hwaddr sizetmp = pow2floor(node_size);
+
+ /* mem_start != 0 here */
+ if (ctzl(mem_start) < ctzl(sizetmp)) {
+ sizetmp = 1ULL << ctzl(mem_start);
+ }
+
+ spapr_populate_memory_node(fdt, i, mem_start, sizetmp);
+ node_size -= sizetmp;
+ mem_start += sizetmp;
+ }
}
return 0;
@@ -746,6 +796,7 @@ static void spapr_finalize_fdt(sPAPREnvironment *spapr,
cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
+ g_free(bootlist);
g_free(fdt);
}
@@ -792,25 +843,38 @@ static void spapr_reset_htab(sPAPREnvironment *spapr)
/* Update the RMA size if necessary */
if (spapr->vrma_adjust) {
- hwaddr node0_size = (nb_numa_nodes > 1) ?
- numa_info[0].node_mem : ram_size;
- spapr->rma_size = kvmppc_rma_size(node0_size, spapr->htab_shift);
+ spapr->rma_size = kvmppc_rma_size(spapr_node0_size(),
+ spapr->htab_shift);
}
}
static void ppc_spapr_reset(void)
{
PowerPCCPU *first_ppc_cpu;
+ uint32_t rtas_limit;
/* Reset the hash table & recalc the RMA */
spapr_reset_htab(spapr);
qemu_devices_reset();
+ /*
+ * We place the device tree and RTAS just below either the top of the RMA,
+ * or just below 2GB, whichever is lowere, so that it can be
+ * processed with 32-bit real mode code if necessary
+ */
+ rtas_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR);
+ spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE;
+ spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE;
+
/* Load the fdt */
spapr_finalize_fdt(spapr, spapr->fdt_addr, spapr->rtas_addr,
spapr->rtas_size);
+ /* Copy RTAS over */
+ cpu_physical_memory_write(spapr->rtas_addr, spapr->rtas_blob,
+ spapr->rtas_size);
+
/* Set up the entry state */
first_ppc_cpu = POWERPC_CPU(first_cpu);
first_ppc_cpu->env.gpr[3] = spapr->fdt_addr;
@@ -1227,10 +1291,10 @@ static void ppc_spapr_init(MachineState *machine)
MemoryRegion *rma_region;
void *rma = NULL;
hwaddr rma_alloc_size;
- hwaddr node0_size = (nb_numa_nodes > 1) ? numa_info[0].node_mem : ram_size;
+ hwaddr node0_size = spapr_node0_size();
uint32_t initrd_base = 0;
long kernel_size = 0, initrd_size = 0;
- long load_limit, rtas_limit, fw_size;
+ long load_limit, fw_size;
bool kernel_le = false;
char *filename;
@@ -1275,13 +1339,8 @@ static void ppc_spapr_init(MachineState *machine)
exit(1);
}
- /* We place the device tree and RTAS just below either the top of the RMA,
- * or just below 2GB, whichever is lowere, so that it can be
- * processed with 32-bit real mode code if necessary */
- rtas_limit = MIN(spapr->rma_size, 0x80000000);
- spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE;
- spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE;
- load_limit = spapr->fdt_addr - FW_OVERHEAD;
+ /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
+ load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
/* We aim for a hash table of size 1/128 the size of RAM. The
* normal rule of thumb is 1/64 the size of RAM, but that's much
@@ -1349,14 +1408,14 @@ static void ppc_spapr_init(MachineState *machine)
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
- spapr->rtas_size = load_image_targphys(filename, spapr->rtas_addr,
- rtas_limit - spapr->rtas_addr);
- if (spapr->rtas_size < 0) {
+ spapr->rtas_size = get_image_size(filename);
+ spapr->rtas_blob = g_malloc(spapr->rtas_size);
+ if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) {
hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
exit(1);
}
if (spapr->rtas_size > RTAS_MAX_SIZE) {
- hw_error("RTAS too big ! 0x%lx bytes (max is 0x%x)\n",
+ hw_error("RTAS too big ! 0x%zx bytes (max is 0x%x)\n",
spapr->rtas_size, RTAS_MAX_SIZE);
exit(1);
}
@@ -1378,7 +1437,6 @@ static void ppc_spapr_init(MachineState *machine)
spapr_create_nvram(spapr);
/* Set up PCI */
- spapr_pci_msi_init(spapr, SPAPR_PCI_MSI_WINDOW);
spapr_pci_rtas_init();
phb = spapr_create_phb(spapr, 0);
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 467858ce05..86514472fe 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -712,10 +712,10 @@ static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPREnvironment *spapr,
return H_SUCCESS;
}
-static target_ulong h_set_mode_resouce_le(PowerPCCPU *cpu,
- target_ulong mflags,
- target_ulong value1,
- target_ulong value2)
+static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
+ target_ulong mflags,
+ target_ulong value1,
+ target_ulong value2)
{
CPUState *cs;
@@ -743,10 +743,10 @@ static target_ulong h_set_mode_resouce_le(PowerPCCPU *cpu,
return H_UNSUPPORTED_FLAG;
}
-static target_ulong h_set_mode_resouce_addr_trans_mode(PowerPCCPU *cpu,
- target_ulong mflags,
- target_ulong value1,
- target_ulong value2)
+static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
+ target_ulong mflags,
+ target_ulong value1,
+ target_ulong value2)
{
CPUState *cs;
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
@@ -794,11 +794,11 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr,
switch (resource) {
case H_SET_MODE_RESOURCE_LE:
- ret = h_set_mode_resouce_le(cpu, args[0], args[2], args[3]);
+ ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
break;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
- ret = h_set_mode_resouce_addr_trans_mode(cpu, args[0],
- args[2], args[3]);
+ ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
+ args[2], args[3]);
break;
}
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 9ed39a93b7..ad0da7fdc4 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -262,7 +262,6 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
unsigned int irq, max_irqs = 0, num = 0;
sPAPRPHBState *phb = NULL;
PCIDevice *pdev = NULL;
- bool msix = false;
spapr_pci_msi *msi;
int *config_addr_key;
@@ -300,7 +299,12 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
xics_free(spapr->icp, msi->first_irq, msi->num);
- spapr_msi_setmsg(pdev, 0, msix, 0, num);
+ if (msi_present(pdev)) {
+ spapr_msi_setmsg(pdev, 0, false, 0, num);
+ }
+ if (msix_present(pdev)) {
+ spapr_msi_setmsg(pdev, 0, true, 0, num);
+ }
g_hash_table_remove(phb->msi, &config_addr);
trace_spapr_pci_msi("Released MSIs", config_addr);
@@ -341,7 +345,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
/* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
- spapr_msi_setmsg(pdev, spapr->msi_win_addr, ret_intr_type == RTAS_TYPE_MSIX,
+ spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
irq, req_num);
/* Add MSI device to cache */
@@ -465,34 +469,6 @@ static const MemoryRegionOps spapr_msi_ops = {
.endianness = DEVICE_LITTLE_ENDIAN
};
-void spapr_pci_msi_init(sPAPREnvironment *spapr, hwaddr addr)
-{
- uint64_t window_size = 4096;
-
- /*
- * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
- * we need to allocate some memory to catch those writes coming
- * from msi_notify()/msix_notify().
- * As MSIMessage:addr is going to be the same and MSIMessage:data
- * is going to be a VIRQ number, 4 bytes of the MSI MR will only
- * be used.
- *
- * For KVM we want to ensure that this memory is a full page so that
- * our memory slot is of page size granularity.
- */
-#ifdef CONFIG_KVM
- if (kvm_enabled()) {
- window_size = getpagesize();
- }
-#endif
-
- spapr->msi_win_addr = addr;
- memory_region_init_io(&spapr->msiwindow, NULL, &spapr_msi_ops, spapr,
- "msi", window_size);
- memory_region_add_subregion(get_system_memory(), spapr->msi_win_addr,
- &spapr->msiwindow);
-}
-
/*
* PHB PCI device
*/
@@ -512,6 +488,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
char *namebuf;
int i;
PCIBus *bus;
+ uint64_t msi_window_size = 4096;
if (sphb->index != -1) {
hwaddr windows_base;
@@ -604,6 +581,28 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
address_space_init(&sphb->iommu_as, &sphb->iommu_root,
sphb->dtbusname);
+ /*
+ * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
+ * we need to allocate some memory to catch those writes coming
+ * from msi_notify()/msix_notify().
+ * As MSIMessage:addr is going to be the same and MSIMessage:data
+ * is going to be a VIRQ number, 4 bytes of the MSI MR will only
+ * be used.
+ *
+ * For KVM we want to ensure that this memory is a full page so that
+ * our memory slot is of page size granularity.
+ */
+#ifdef CONFIG_KVM
+ if (kvm_enabled()) {
+ msi_window_size = getpagesize();
+ }
+#endif
+
+ memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr,
+ "msi", msi_window_size);
+ memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
+ &sphb->msiwindow);
+
pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index 9ba1ba69f9..2ec2a8e4d1 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -277,6 +277,19 @@ static void rtas_ibm_set_system_parameter(PowerPCCPU *cpu,
rtas_st(rets, 0, ret);
}
+static void rtas_ibm_os_term(PowerPCCPU *cpu,
+ sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ target_ulong ret = 0;
+
+ qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, &error_abort);
+
+ rtas_st(rets, 0, ret);
+}
+
static struct rtas_call {
const char *name;
spapr_rtas_fn fn;
@@ -404,6 +417,8 @@ static void core_rtas_register_types(void)
spapr_rtas_register(RTAS_IBM_SET_SYSTEM_PARAMETER,
"ibm,set-system-parameter",
rtas_ibm_set_system_parameter);
+ spapr_rtas_register(RTAS_IBM_OS_TERM, "ibm,os-term",
+ rtas_ibm_os_term);
}
type_init(core_rtas_register_types)
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index bbe4c5fb85..73ced1f5f8 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -499,6 +499,7 @@ enum xhci_flags {
XHCI_FLAG_USE_MSI = 1,
XHCI_FLAG_USE_MSI_X,
XHCI_FLAG_SS_FIRST,
+ XHCI_FLAG_FORCE_PCIE_ENDCAP,
};
static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
@@ -3626,7 +3627,8 @@ static int usb_xhci_initfn(struct PCIDevice *dev)
PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64,
&xhci->mem);
- if (pci_bus_is_express(dev->bus)) {
+ if (pci_bus_is_express(dev->bus) ||
+ xhci_get_flag(xhci, XHCI_FLAG_FORCE_PCIE_ENDCAP)) {
ret = pcie_endpoint_cap_init(dev, 0xa0);
assert(ret >= 0);
}
@@ -3855,6 +3857,8 @@ static Property xhci_properties[] = {
DEFINE_PROP_BIT("msix", XHCIState, flags, XHCI_FLAG_USE_MSI_X, true),
DEFINE_PROP_BIT("superspeed-ports-first",
XHCIState, flags, XHCI_FLAG_SS_FIRST, true),
+ DEFINE_PROP_BIT("force-pcie-endcap", XHCIState, flags,
+ XHCI_FLAG_FORCE_PCIE_ENDCAP, false),
DEFINE_PROP_UINT32("intrs", XHCIState, numintrs, MAXINTRS),
DEFINE_PROP_UINT32("slots", XHCIState, numslots, MAXSLOTS),
DEFINE_PROP_UINT32("p2", XHCIState, numports_2, 4),
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 5e5d86ec46..421a142a0d 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -356,10 +356,6 @@ static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
#endif
-typedef void (CPUDebugExcpHandler)(CPUArchState *env);
-
-void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
-
/* vl.c */
extern int singlestep;
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index a39cb42290..77316d55c8 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -330,6 +330,11 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
.value = "off",\
},\
{\
+ .driver = "nec-usb-xhci",\
+ .property = "force-pcie-endcap",\
+ .value = "on",\
+ },\
+ {\
.driver = "pci-serial",\
.property = "prog_if",\
.value = stringify(0),\
diff --git a/include/hw/loader.h b/include/hw/loader.h
index 00c9117ee0..9190387b8e 100644
--- a/include/hw/loader.h
+++ b/include/hw/loader.h
@@ -13,6 +13,7 @@
*/
int get_image_size(const char *filename);
int load_image(const char *filename, uint8_t *addr); /* deprecated */
+ssize_t load_image_size(const char *filename, void *addr, size_t size);
int load_image_targphys(const char *filename, hwaddr,
uint64_t max_sz);
int load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz);
diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
index 32f0aa7d10..4ea2a0d14a 100644
--- a/include/hw/pci-host/spapr.h
+++ b/include/hw/pci-host/spapr.h
@@ -70,7 +70,7 @@ struct sPAPRPHBState {
MemoryRegion memspace, iospace;
hwaddr mem_win_addr, mem_win_size, io_win_addr, io_win_size;
- MemoryRegion memwindow, iowindow;
+ MemoryRegion memwindow, iowindow, msiwindow;
uint32_t dma_liobn;
AddressSpace iommu_as;
diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h
index 7e16e2e06c..14efd0ca31 100644
--- a/include/hw/ppc/ppc.h
+++ b/include/hw/ppc/ppc.h
@@ -92,7 +92,7 @@ enum {
#define FW_CFG_PPC_IS_KVM (FW_CFG_ARCH_LOCAL + 0x05)
#define FW_CFG_PPC_KVM_HC (FW_CFG_ARCH_LOCAL + 0x06)
#define FW_CFG_PPC_KVM_PID (FW_CFG_ARCH_LOCAL + 0x07)
-/* OpenBIOS has FW_CFG_PPC_NVRAM_ADDR as +0x08 */
+#define FW_CFG_PPC_NVRAM_ADDR (FW_CFG_ARCH_LOCAL + 0x08)
#define FW_CFG_PPC_BUSFREQ (FW_CFG_ARCH_LOCAL + 0x09)
#define PPC_SERIAL_MM_BAUDBASE 399193
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index bbba51a703..749daf4dd7 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -13,8 +13,6 @@ struct sPAPRNVRAM;
typedef struct sPAPREnvironment {
struct VIOsPAPRBus *vio_bus;
QLIST_HEAD(, sPAPRPHBState) phbs;
- hwaddr msi_win_addr;
- MemoryRegion msiwindow;
struct sPAPRNVRAM *nvram;
XICSState *icp;
@@ -24,7 +22,8 @@ typedef struct sPAPREnvironment {
hwaddr rma_size;
int vrma_adjust;
hwaddr fdt_addr, rtas_addr;
- long rtas_size;
+ ssize_t rtas_size;
+ void *rtas_blob;
void *fdt_skel;
target_ulong entry_point;
uint64_t rtc_offset;
@@ -382,9 +381,8 @@ int spapr_allocate_irq_block(int num, bool lsi, bool msi);
#define RTAS_GET_SENSOR_STATE (RTAS_TOKEN_BASE + 0x1D)
#define RTAS_IBM_CONFIGURE_CONNECTOR (RTAS_TOKEN_BASE + 0x1E)
#define RTAS_IBM_OS_TERM (RTAS_TOKEN_BASE + 0x1F)
-#define RTAS_IBM_EXTENDED_OS_TERM (RTAS_TOKEN_BASE + 0x20)
-#define RTAS_TOKEN_MAX (RTAS_TOKEN_BASE + 0x21)
+#define RTAS_TOKEN_MAX (RTAS_TOKEN_BASE + 0x20)
/* RTAS ibm,get-system-parameter token values */
#define RTAS_SYSPARM_SPLPAR_CHARACTERISTICS 20
diff --git a/include/qapi/util.h b/include/qapi/util.h
new file mode 100644
index 0000000000..de9238bf95
--- /dev/null
+++ b/include/qapi/util.h
@@ -0,0 +1,17 @@
+/*
+ * QAPI util functions
+ *
+ * Copyright Fujitsu, Inc. 2014
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QAPI_UTIL_H
+#define QAPI_UTIL_H
+
+int qapi_enum_parse(const char *lookup[], const char *buf,
+ int max, int def, Error **errp);
+
+#endif
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 1aafbf5f34..370b3ebee9 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -95,6 +95,7 @@ struct TranslationBlock;
* @get_phys_page_debug: Callback for obtaining a physical address.
* @gdb_read_register: Callback for letting GDB read a register.
* @gdb_write_register: Callback for letting GDB write a register.
+ * @debug_excp_handler: Callback for handling debug exceptions.
* @vmsd: State description for migration.
* @gdb_num_core_regs: Number of core registers accessible to GDB.
* @gdb_core_xml_file: File name for core registers GDB XML description.
@@ -134,6 +135,7 @@ typedef struct CPUClass {
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
+ void (*debug_excp_handler)(CPUState *cpu);
int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
int cpuid, void *opaque);
@@ -169,7 +171,8 @@ typedef struct CPUBreakpoint {
typedef struct CPUWatchpoint {
vaddr vaddr;
- vaddr len_mask;
+ vaddr len;
+ vaddr hitaddr;
int flags; /* BP_* */
QTAILQ_ENTRY(CPUWatchpoint) entry;
} CPUWatchpoint;
@@ -622,9 +625,12 @@ void cpu_single_step(CPUState *cpu, int enabled);
#define BP_MEM_WRITE 0x02
#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
#define BP_STOP_BEFORE_ACCESS 0x04
-#define BP_WATCHPOINT_HIT 0x08
+/* 0x08 currently unused */
#define BP_GDB 0x10
#define BP_CPU 0x20
+#define BP_WATCHPOINT_HIT_READ 0x40
+#define BP_WATCHPOINT_HIT_WRITE 0x80
+#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint);
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index 174ea36afa..d2000af9c3 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -303,6 +303,8 @@ bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
int kvm_check_extension(KVMState *s, unsigned int extension);
+int kvm_vm_check_extension(KVMState *s, unsigned int extension);
+
#define kvm_vm_enable_cap(s, capability, cap_flags, ...) \
({ \
struct kvm_enable_cap cap = { \
diff --git a/include/ui/console.h b/include/ui/console.h
index 845526ed01..cde0faf6e5 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -102,8 +102,7 @@ struct QemuConsoleClass {
ObjectClass parent_class;
};
-#define QEMU_BIG_ENDIAN_FLAG 0x01
-#define QEMU_ALLOCATED_FLAG 0x02
+#define QEMU_ALLOCATED_FLAG 0x01
struct PixelFormat {
uint8_t bits_per_pixel;
@@ -119,8 +118,6 @@ struct DisplaySurface {
pixman_format_code_t format;
pixman_image_t *image;
uint8_t flags;
-
- struct PixelFormat pf;
};
typedef struct QemuUIInfo {
@@ -188,9 +185,13 @@ struct DisplayChangeListener {
};
DisplayState *init_displaystate(void);
-DisplaySurface* qemu_create_displaysurface_from(int width, int height, int bpp,
- int linesize, uint8_t *data,
- bool byteswap);
+DisplaySurface *qemu_create_displaysurface_from(int width, int height,
+ pixman_format_code_t format,
+ int linesize, uint8_t *data);
+DisplaySurface *qemu_create_displaysurface_guestmem(int width, int height,
+ pixman_format_code_t format,
+ int linesize,
+ uint64_t addr);
PixelFormat qemu_different_endianness_pixelformat(int bpp);
PixelFormat qemu_default_pixelformat(int bpp);
@@ -199,10 +200,12 @@ void qemu_free_displaysurface(DisplaySurface *surface);
static inline int is_surface_bgr(DisplaySurface *surface)
{
- if (surface->pf.bits_per_pixel == 32 && surface->pf.rshift == 0)
+ if (PIXMAN_FORMAT_BPP(surface->format) == 32 &&
+ PIXMAN_FORMAT_TYPE(surface->format) == PIXMAN_TYPE_ABGR) {
return 1;
- else
+ } else {
return 0;
+ }
}
static inline int is_buffer_shared(DisplaySurface *surface)
@@ -228,6 +231,10 @@ void dpy_text_resize(QemuConsole *con, int w, int h);
void dpy_mouse_set(QemuConsole *con, int x, int y, int on);
void dpy_cursor_define(QemuConsole *con, QEMUCursor *cursor);
bool dpy_cursor_define_supported(QemuConsole *con);
+void dpy_gfx_update_dirty(QemuConsole *con,
+ MemoryRegion *address_space,
+ uint64_t base,
+ bool invalidate);
static inline int surface_stride(DisplaySurface *s)
{
diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h
index ba970f813b..381969d97b 100644
--- a/include/ui/qemu-pixman.h
+++ b/include/ui/qemu-pixman.h
@@ -33,6 +33,8 @@
/* -------------------------------------------------------------------- */
+PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format);
+pixman_format_code_t qemu_default_pixman_format(int bpp, bool native_endian);
int qemu_pixman_get_type(int rshift, int gshift, int bshift);
pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf);
@@ -40,6 +42,8 @@ pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format,
int width);
void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb,
int width, int x, int y);
+void qemu_pixman_linebuf_copy(pixman_image_t *fb, int width, int x, int y,
+ pixman_image_t *linebuf);
pixman_image_t *qemu_pixman_mirror_create(pixman_format_code_t format,
pixman_image_t *image);
void qemu_pixman_image_unref(pixman_image_t *image);
diff --git a/kvm-all.c b/kvm-all.c
index f5edcb13d1..8b9e66d42d 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -493,6 +493,19 @@ int kvm_check_extension(KVMState *s, unsigned int extension)
return ret;
}
+int kvm_vm_check_extension(KVMState *s, unsigned int extension)
+{
+ int ret;
+
+ ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
+ if (ret < 0) {
+ /* VM wide version not implemented, use global one instead */
+ ret = kvm_check_extension(s, extension);
+ }
+
+ return ret;
+}
+
static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
bool assign, uint32_t size, bool datamatch)
{
diff --git a/linux-user/main.c b/linux-user/main.c
index 472a16d2db..483eb3fec2 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -3458,8 +3458,7 @@ CPUArchState *cpu_copy(CPUArchState *env)
cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
}
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- cpu_watchpoint_insert(new_cpu, wp->vaddr, (~wp->len_mask) + 1,
- wp->flags, NULL);
+ cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL);
}
#endif
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 26929c59de..e11b20887e 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -4325,15 +4325,7 @@ badframe:
return 0;
}
-#elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
-
-/* FIXME: Many of the structures are defined for both PPC and PPC64, but
- the signal handling is different enough that we haven't implemented
- support for PPC64 yet. Hence the restriction above.
-
- There are various #if'd blocks for code for TARGET_PPC64. These
- blocks should go away so that we can successfully run 32-bit and
- 64-bit binaries on a QEMU configured for PPC64. */
+#elif defined(TARGET_PPC)
/* Size of dummy stack frame allocated when calling signal handler.
See arch/powerpc/include/asm/ptrace.h. */
@@ -4343,6 +4335,33 @@ badframe:
#define SIGNAL_FRAMESIZE 64
#endif
+/* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
+ on 64-bit PPC, sigcontext and mcontext are one and the same. */
+struct target_mcontext {
+ target_ulong mc_gregs[48];
+ /* Includes fpscr. */
+ uint64_t mc_fregs[33];
+ target_ulong mc_pad[2];
+ /* We need to handle Altivec and SPE at the same time, which no
+ kernel needs to do. Fortunately, the kernel defines this bit to
+ be Altivec-register-large all the time, rather than trying to
+ twiddle it based on the specific platform. */
+ union {
+ /* SPE vector registers. One extra for SPEFSCR. */
+ uint32_t spe[33];
+ /* Altivec vector registers. The packing of VSCR and VRSAVE
+ varies depending on whether we're PPC64 or not: PPC64 splits
+ them apart; PPC32 stuffs them together. */
+#if defined(TARGET_PPC64)
+#define QEMU_NVRREG 34
+#else
+#define QEMU_NVRREG 33
+#endif
+ ppc_avr_t altivec[QEMU_NVRREG];
+#undef QEMU_NVRREG
+ } mc_vregs __attribute__((__aligned__(16)));
+};
+
/* See arch/powerpc/include/asm/sigcontext.h. */
struct target_sigcontext {
target_ulong _unused[4];
@@ -4353,7 +4372,9 @@ struct target_sigcontext {
target_ulong handler;
target_ulong oldmask;
target_ulong regs; /* struct pt_regs __user * */
- /* TODO: PPC64 includes extra bits here. */
+#if defined(TARGET_PPC64)
+ struct target_mcontext mcontext;
+#endif
};
/* Indices for target_mcontext.mc_gregs, below.
@@ -4408,32 +4429,6 @@ enum {
TARGET_PT_REGS_COUNT = 44
};
-/* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
- on 64-bit PPC, sigcontext and mcontext are one and the same. */
-struct target_mcontext {
- target_ulong mc_gregs[48];
- /* Includes fpscr. */
- uint64_t mc_fregs[33];
- target_ulong mc_pad[2];
- /* We need to handle Altivec and SPE at the same time, which no
- kernel needs to do. Fortunately, the kernel defines this bit to
- be Altivec-register-large all the time, rather than trying to
- twiddle it based on the specific platform. */
- union {
- /* SPE vector registers. One extra for SPEFSCR. */
- uint32_t spe[33];
- /* Altivec vector registers. The packing of VSCR and VRSAVE
- varies depending on whether we're PPC64 or not: PPC64 splits
- them apart; PPC32 stuffs them together. */
-#if defined(TARGET_PPC64)
-#define QEMU_NVRREG 34
-#else
-#define QEMU_NVRREG 33
-#endif
- ppc_avr_t altivec[QEMU_NVRREG];
-#undef QEMU_NVRREG
- } mc_vregs __attribute__((__aligned__(16)));
-};
struct target_ucontext {
target_ulong tuc_flags;
@@ -4447,7 +4442,7 @@ struct target_ucontext {
target_sigset_t tuc_sigmask;
#if defined(TARGET_PPC64)
target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
- struct target_sigcontext tuc_mcontext;
+ struct target_sigcontext tuc_sigcontext;
#else
int32_t tuc_maskext[30];
int32_t tuc_pad2[3];
@@ -4462,12 +4457,41 @@ struct target_sigframe {
int32_t abigap[56];
};
+#if defined(TARGET_PPC64)
+
+#define TARGET_TRAMP_SIZE 6
+
+struct target_rt_sigframe {
+ /* sys_rt_sigreturn requires the ucontext be the first field */
+ struct target_ucontext uc;
+ target_ulong _unused[2];
+ uint32_t trampoline[TARGET_TRAMP_SIZE];
+ target_ulong pinfo; /* struct siginfo __user * */
+ target_ulong puc; /* void __user * */
+ struct target_siginfo info;
+ /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
+ char abigap[288];
+} __attribute__((aligned(16)));
+
+#else
+
struct target_rt_sigframe {
struct target_siginfo info;
struct target_ucontext uc;
int32_t abigap[56];
};
+#endif
+
+#if defined(TARGET_PPC64)
+
+struct target_func_ptr {
+ target_ulong entry;
+ target_ulong toc;
+};
+
+#endif
+
/* We use the mc_pad field for the signal return trampoline. */
#define tramp mc_pad
@@ -4491,8 +4515,7 @@ static target_ulong get_sigframe(struct target_sigaction *ka,
return newsp;
}
-static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame,
- int sigret)
+static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
{
target_ulong msr = env->msr;
int i;
@@ -4559,11 +4582,14 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame,
/* Store MSR. */
__put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
+}
+static void encode_trampoline(int sigret, uint32_t *tramp)
+{
/* Set up the sigreturn trampoline: li r0,sigret; sc. */
if (sigret) {
- __put_user(0x38000000UL | sigret, &frame->tramp[0]);
- __put_user(0x44000002UL, &frame->tramp[1]);
+ __put_user(0x38000000 | sigret, &tramp[0]);
+ __put_user(0x44000002, &tramp[1]);
}
}
@@ -4655,6 +4681,9 @@ static void setup_frame(int sig, struct target_sigaction *ka,
target_ulong frame_addr, newsp;
int err = 0;
int signal;
+#if defined(TARGET_PPC64)
+ struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
+#endif
frame_addr = get_sigframe(ka, env, sizeof(*frame));
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
@@ -4665,7 +4694,7 @@ static void setup_frame(int sig, struct target_sigaction *ka,
__put_user(ka->_sa_handler, &sc->handler);
__put_user(set->sig[0], &sc->oldmask);
-#if defined(TARGET_PPC64)
+#if TARGET_ABI_BITS == 64
__put_user(set->sig[0] >> 32, &sc->_unused[3]);
#else
__put_user(set->sig[1], &sc->_unused[3]);
@@ -4674,7 +4703,10 @@ static void setup_frame(int sig, struct target_sigaction *ka,
__put_user(sig, &sc->signal);
/* Save user regs. */
- save_user_regs(env, &frame->mctx, TARGET_NR_sigreturn);
+ save_user_regs(env, &frame->mctx);
+
+ /* Construct the trampoline code on the stack. */
+ encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
/* The kernel checks for the presence of a VDSO here. We don't
emulate a vdso, so use a sigreturn system call. */
@@ -4694,7 +4726,24 @@ static void setup_frame(int sig, struct target_sigaction *ka,
env->gpr[1] = newsp;
env->gpr[3] = signal;
env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
+
+#if defined(TARGET_PPC64)
+ if (get_ppc64_abi(image) < 2) {
+ /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
+ struct target_func_ptr *handler =
+ (struct target_func_ptr *)g2h(ka->_sa_handler);
+ env->nip = tswapl(handler->entry);
+ env->gpr[2] = tswapl(handler->toc);
+ } else {
+ /* ELFv2 PPC64 function pointers are entry points, but R12
+ * must also be set */
+ env->nip = tswapl((target_ulong) ka->_sa_handler);
+ env->gpr[12] = env->nip;
+ }
+#else
env->nip = (target_ulong) ka->_sa_handler;
+#endif
+
/* Signal handlers are entered in big-endian mode. */
env->msr &= ~MSR_LE;
@@ -4712,10 +4761,14 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
target_sigset_t *set, CPUPPCState *env)
{
struct target_rt_sigframe *rt_sf;
- struct target_mcontext *frame;
+ uint32_t *trampptr = 0;
+ struct target_mcontext *mctx = 0;
target_ulong rt_sf_addr, newsp = 0;
int i, err = 0;
int signal;
+#if defined(TARGET_PPC64)
+ struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
+#endif
rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
@@ -4733,25 +4786,35 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
&rt_sf->uc.tuc_stack.ss_flags);
__put_user(target_sigaltstack_used.ss_size,
&rt_sf->uc.tuc_stack.ss_size);
+#if !defined(TARGET_PPC64)
__put_user(h2g (&rt_sf->uc.tuc_mcontext),
&rt_sf->uc.tuc_regs);
+#endif
for(i = 0; i < TARGET_NSIG_WORDS; i++) {
__put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
}
- frame = &rt_sf->uc.tuc_mcontext;
- save_user_regs(env, frame, TARGET_NR_rt_sigreturn);
+#if defined(TARGET_PPC64)
+ mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
+ trampptr = &rt_sf->trampoline[0];
+#else
+ mctx = &rt_sf->uc.tuc_mcontext;
+ trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
+#endif
+
+ save_user_regs(env, mctx);
+ encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
/* The kernel checks for the presence of a VDSO here. We don't
emulate a vdso, so use a sigreturn system call. */
- env->lr = (target_ulong) h2g(frame->tramp);
+ env->lr = (target_ulong) h2g(trampptr);
/* Turn off all fp exceptions. */
env->fpscr = 0;
/* Create a stack frame for the caller of the handler. */
newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
- __put_user(env->gpr[1], (target_ulong *)(uintptr_t) newsp);
+ err |= put_user(env->gpr[1], newsp, target_ulong);
if (err)
goto sigsegv;
@@ -4762,7 +4825,24 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
env->gpr[6] = (target_ulong) h2g(rt_sf);
+
+#if defined(TARGET_PPC64)
+ if (get_ppc64_abi(image) < 2) {
+ /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
+ struct target_func_ptr *handler =
+ (struct target_func_ptr *)g2h(ka->_sa_handler);
+ env->nip = tswapl(handler->entry);
+ env->gpr[2] = tswapl(handler->toc);
+ } else {
+ /* ELFv2 PPC64 function pointers are entry points, but R12
+ * must also be set */
+ env->nip = tswapl((target_ulong) ka->_sa_handler);
+ env->gpr[12] = env->nip;
+ }
+#else
env->nip = (target_ulong) ka->_sa_handler;
+#endif
+
/* Signal handlers are entered in big-endian mode. */
env->msr &= ~MSR_LE;
@@ -4789,7 +4869,7 @@ long do_sigreturn(CPUPPCState *env)
goto sigsegv;
#if defined(TARGET_PPC64)
- set.sig[0] = sc->oldmask + ((long)(sc->_unused[3]) << 32);
+ set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
#else
__get_user(set.sig[0], &sc->oldmask);
__get_user(set.sig[1], &sc->_unused[3]);
@@ -4827,10 +4907,11 @@ static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
return 1;
#if defined(TARGET_PPC64)
- fprintf (stderr, "do_setcontext: not implemented\n");
- return 0;
+ mcp_addr = h2g(ucp) +
+ offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
#else
__get_user(mcp_addr, &ucp->tuc_regs);
+#endif
if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
return 1;
@@ -4841,7 +4922,6 @@ static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
unlock_user_struct(mcp, mcp_addr, 1);
return 0;
-#endif
}
long do_rt_sigreturn(CPUPPCState *env)
diff --git a/qapi/Makefile.objs b/qapi/Makefile.objs
index d14b769cff..2278970690 100644
--- a/qapi/Makefile.objs
+++ b/qapi/Makefile.objs
@@ -1,6 +1,6 @@
util-obj-y = qapi-visit-core.o qapi-dealloc-visitor.o qmp-input-visitor.o
util-obj-y += qmp-output-visitor.o qmp-registry.o qmp-dispatch.o
util-obj-y += string-input-visitor.o string-output-visitor.o
-
util-obj-y += opts-visitor.o
util-obj-y += qmp-event.o
+util-obj-y += qapi-util.o
diff --git a/qapi/qapi-util.c b/qapi/qapi-util.c
new file mode 100644
index 0000000000..1d8fb96eff
--- /dev/null
+++ b/qapi/qapi-util.c
@@ -0,0 +1,34 @@
+/*
+ * QAPI util functions
+ *
+ * Authors:
+ * Hu Tao <hutao@cn.fujitsu.com>
+ * Peter Lieven <pl@kamp.de>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include "qemu-common.h"
+#include "qapi/error.h"
+#include "qapi/util.h"
+
+int qapi_enum_parse(const char *lookup[], const char *buf,
+ int max, int def, Error **errp)
+{
+ int i;
+
+ if (!buf) {
+ return def;
+ }
+
+ for (i = 0; i < max; i++) {
+ if (!strcmp(buf, lookup[i])) {
+ return i;
+ }
+ }
+
+ error_setg(errp, "invalid parameter value: %s", buf);
+ return def;
+}
diff --git a/qemu-img.c b/qemu-img.c
index ff29ed1c67..91d1ac3d7d 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -95,7 +95,8 @@ static void QEMU_NORETURN help(void)
" 'cache' is the cache mode used to write the output disk image, the valid\n"
" options are: 'none', 'writeback' (default, except for convert), 'writethrough',\n"
" 'directsync' and 'unsafe' (default for convert)\n"
- " 'src_cache' in contrast is the cache mode used to read input disk images\n"
+ " 'src_cache' is the cache mode used to read input disk images, the valid\n"
+ " options are the same as for the 'cache' option\n"
" 'size' is the disk image size in bytes. Optional suffixes\n"
" 'k' or 'K' (kilobyte, 1024), 'M' (megabyte, 1024k), 'G' (gigabyte, 1024M),\n"
" 'T' (terabyte, 1024G), 'P' (petabyte, 1024T) and 'E' (exabyte, 1024P) are\n"
diff --git a/qemu-img.texi b/qemu-img.texi
index cb689483b6..cc4668e64f 100644
--- a/qemu-img.texi
+++ b/qemu-img.texi
@@ -73,8 +73,9 @@ specifies the cache mode that should be used with the (destination) file. See
the documentation of the emulator's @code{-drive cache=...} option for allowed
values.
@item -T @var{src_cache}
-in contrast specifies the cache mode that should be used with the source
-file(s).
+specifies the cache mode that should be used with the source file(s). See
+the documentation of the emulator's @code{-drive cache=...} option for allowed
+values.
@end table
Parameters to snapshot subcommand:
@@ -340,7 +341,7 @@ string), then the image is rebased onto no backing file (i.e. it will exist
independently of any backing file).
@var{cache} specifies the cache mode to be used for @var{filename}, whereas
-@var{src_cache} specifies the cache mode for reading the new backing file.
+@var{src_cache} specifies the cache mode for reading backing files.
There are two different modes in which @code{rebase} can operate:
@table @option
diff --git a/qemu-nbd.c b/qemu-nbd.c
index 626e5844f9..9bc152e6c3 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -18,11 +18,13 @@
#include "qemu-common.h"
#include "block/block.h"
+#include "block/block_int.h"
#include "block/nbd.h"
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
#include "qemu/error-report.h"
#include "block/snapshot.h"
+#include "qapi/util.h"
#include <stdarg.h>
#include <stdio.h>
@@ -37,10 +39,11 @@
#include <libgen.h>
#include <pthread.h>
-#define SOCKET_PATH "/var/lock/qemu-nbd-%s"
-#define QEMU_NBD_OPT_CACHE 1
-#define QEMU_NBD_OPT_AIO 2
-#define QEMU_NBD_OPT_DISCARD 3
+#define SOCKET_PATH "/var/lock/qemu-nbd-%s"
+#define QEMU_NBD_OPT_CACHE 1
+#define QEMU_NBD_OPT_AIO 2
+#define QEMU_NBD_OPT_DISCARD 3
+#define QEMU_NBD_OPT_DETECT_ZEROES 4
static NBDExport *exp;
static int verbose;
@@ -57,45 +60,47 @@ static void usage(const char *name)
"Usage: %s [OPTIONS] FILE\n"
"QEMU Disk Network Block Device Server\n"
"\n"
-" -h, --help display this help and exit\n"
-" -V, --version output version information and exit\n"
+" -h, --help display this help and exit\n"
+" -V, --version output version information and exit\n"
"\n"
"Connection properties:\n"
-" -p, --port=PORT port to listen on (default `%d')\n"
-" -b, --bind=IFACE interface to bind to (default `0.0.0.0')\n"
-" -k, --socket=PATH path to the unix socket\n"
-" (default '"SOCKET_PATH"')\n"
-" -e, --shared=NUM device can be shared by NUM clients (default '1')\n"
-" -t, --persistent don't exit on the last connection\n"
-" -v, --verbose display extra debugging information\n"
+" -p, --port=PORT port to listen on (default `%d')\n"
+" -b, --bind=IFACE interface to bind to (default `0.0.0.0')\n"
+" -k, --socket=PATH path to the unix socket\n"
+" (default '"SOCKET_PATH"')\n"
+" -e, --shared=NUM device can be shared by NUM clients (default '1')\n"
+" -t, --persistent don't exit on the last connection\n"
+" -v, --verbose display extra debugging information\n"
"\n"
"Exposing part of the image:\n"
-" -o, --offset=OFFSET offset into the image\n"
-" -P, --partition=NUM only expose partition NUM\n"
+" -o, --offset=OFFSET offset into the image\n"
+" -P, --partition=NUM only expose partition NUM\n"
"\n"
#ifdef __linux__
"Kernel NBD client support:\n"
-" -c, --connect=DEV connect FILE to the local NBD device DEV\n"
-" -d, --disconnect disconnect the specified device\n"
+" -c, --connect=DEV connect FILE to the local NBD device DEV\n"
+" -d, --disconnect disconnect the specified device\n"
"\n"
#endif
"\n"
"Block device options:\n"
-" -f, --format=FORMAT set image format (raw, qcow2, ...)\n"
-" -r, --read-only export read-only\n"
-" -s, --snapshot use FILE as an external snapshot, create a temporary\n"
-" file with backing_file=FILE, redirect the write to\n"
-" the temporary one\n"
+" -f, --format=FORMAT set image format (raw, qcow2, ...)\n"
+" -r, --read-only export read-only\n"
+" -s, --snapshot use FILE as an external snapshot, create a temporary\n"
+" file with backing_file=FILE, redirect the write to\n"
+" the temporary one\n"
" -l, --load-snapshot=SNAPSHOT_PARAM\n"
-" load an internal snapshot inside FILE and export it\n"
-" as an read-only device, SNAPSHOT_PARAM format is\n"
-" 'snapshot.id=[ID],snapshot.name=[NAME]', or\n"
-" '[ID_OR_NAME]'\n"
-" -n, --nocache disable host cache\n"
-" --cache=MODE set cache mode (none, writeback, ...)\n"
+" load an internal snapshot inside FILE and export it\n"
+" as an read-only device, SNAPSHOT_PARAM format is\n"
+" 'snapshot.id=[ID],snapshot.name=[NAME]', or\n"
+" '[ID_OR_NAME]'\n"
+" -n, --nocache disable host cache\n"
+" --cache=MODE set cache mode (none, writeback, ...)\n"
#ifdef CONFIG_LINUX_AIO
-" --aio=MODE set AIO mode (native or threads)\n"
+" --aio=MODE set AIO mode (native or threads)\n"
#endif
+" --discard=MODE set discard mode (ignore, unmap)\n"
+" --detect-zeroes=MODE set detect-zeroes mode (off, on, discard)\n"
"\n"
"Report bugs to <qemu-devel@nongnu.org>\n"
, name, NBD_DEFAULT_PORT, "DEVICE");
@@ -410,6 +415,7 @@ int main(int argc, char **argv)
{ "aio", 1, NULL, QEMU_NBD_OPT_AIO },
#endif
{ "discard", 1, NULL, QEMU_NBD_OPT_DISCARD },
+ { "detect-zeroes", 1, NULL, QEMU_NBD_OPT_DETECT_ZEROES },
{ "shared", 1, NULL, 'e' },
{ "format", 1, NULL, 'f' },
{ "persistent", 0, NULL, 't' },
@@ -432,6 +438,7 @@ int main(int argc, char **argv)
pthread_t client_thread;
const char *fmt = NULL;
Error *local_err = NULL;
+ BlockdevDetectZeroesOptions detect_zeroes = BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
/* The client thread uses SIGTERM to interrupt the server. A signal
* handler ensures that "qemu-nbd -v -c" exits with a nice status code.
@@ -483,6 +490,23 @@ int main(int argc, char **argv)
errx(EXIT_FAILURE, "Invalid discard mode `%s'", optarg);
}
break;
+ case QEMU_NBD_OPT_DETECT_ZEROES:
+ detect_zeroes =
+ qapi_enum_parse(BlockdevDetectZeroesOptions_lookup,
+ optarg,
+ BLOCKDEV_DETECT_ZEROES_OPTIONS_MAX,
+ BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF,
+ &local_err);
+ if (local_err) {
+ errx(EXIT_FAILURE, "Failed to parse detect_zeroes mode: %s",
+ error_get_pretty(local_err));
+ }
+ if (detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP &&
+ !(flags & BDRV_O_UNMAP)) {
+ errx(EXIT_FAILURE, "setting detect-zeroes to unmap is not allowed "
+ "without setting discard operation to unmap");
+ }
+ break;
case 'b':
bindto = optarg;
break;
@@ -522,15 +546,18 @@ int main(int argc, char **argv)
break;
case 'P':
partition = strtol(optarg, &end, 0);
- if (*end)
+ if (*end) {
errx(EXIT_FAILURE, "Invalid partition `%s'", optarg);
- if (partition < 1 || partition > 8)
+ }
+ if (partition < 1 || partition > 8) {
errx(EXIT_FAILURE, "Invalid partition %d", partition);
+ }
break;
case 'k':
sockpath = optarg;
- if (sockpath[0] != '/')
+ if (sockpath[0] != '/') {
errx(EXIT_FAILURE, "socket path must be absolute\n");
+ }
break;
case 'd':
disconnect = true;
@@ -550,9 +577,9 @@ int main(int argc, char **argv)
case 'f':
fmt = optarg;
break;
- case 't':
- persistent = 1;
- break;
+ case 't':
+ persistent = 1;
+ break;
case 'v':
verbose = 1;
break;
@@ -587,7 +614,7 @@ int main(int argc, char **argv)
printf("%s disconnected\n", argv[optind]);
- return 0;
+ return 0;
}
if (device && !verbose) {
@@ -686,6 +713,7 @@ int main(int argc, char **argv)
error_get_pretty(local_err));
}
+ bs->detect_zeroes = detect_zeroes;
fd_size = bdrv_getlength(bs);
if (partition != -1) {
diff --git a/qom/cpu.c b/qom/cpu.c
index b32dd0a562..ba8b402617 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -202,6 +202,10 @@ static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
return target_words_bigendian();
}
+static void cpu_common_debug_excp_handler(CPUState *cpu)
+{
+}
+
void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
@@ -340,6 +344,7 @@ static void cpu_class_init(ObjectClass *klass, void *data)
k->gdb_read_register = cpu_common_gdb_read_register;
k->gdb_write_register = cpu_common_gdb_write_register;
k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
+ k->debug_excp_handler = cpu_common_debug_excp_handler;
dc->realize = cpu_common_realizefn;
/*
* Reason: CPUs still need special care by board code: wiring up
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index 8199f32e32..7ea12bda1c 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -129,26 +129,38 @@ static void arm_cpu_reset(CPUState *s)
env->uncached_cpsr = ARM_CPU_MODE_SVC;
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
/* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
- clear at reset. Initial SP and PC are loaded from ROM. */
+ * clear at reset. Initial SP and PC are loaded from ROM.
+ */
if (IS_M(env)) {
- uint32_t pc;
+ uint32_t initial_msp; /* Loaded from 0x0 */
+ uint32_t initial_pc; /* Loaded from 0x4 */
uint8_t *rom;
+
env->daif &= ~PSTATE_I;
rom = rom_ptr(0);
if (rom) {
- /* We should really use ldl_phys here, in case the guest
- modified flash and reset itself. However images
- loaded via -kernel have not been copied yet, so load the
- values directly from there. */
- env->regs[13] = ldl_p(rom) & 0xFFFFFFFC;
- pc = ldl_p(rom + 4);
- env->thumb = pc & 1;
- env->regs[15] = pc & ~1;
+ /* Address zero is covered by ROM which hasn't yet been
+ * copied into physical memory.
+ */
+ initial_msp = ldl_p(rom);
+ initial_pc = ldl_p(rom + 4);
+ } else {
+ /* Address zero not covered by a ROM blob, or the ROM blob
+ * is in non-modifiable memory and this is a second reset after
+ * it got copied into memory. In the latter case, rom_ptr
+ * will return a NULL pointer and we should use ldl_phys instead.
+ */
+ initial_msp = ldl_phys(s->as, 0);
+ initial_pc = ldl_phys(s->as, 4);
}
+
+ env->regs[13] = initial_msp & 0xFFFFFFFC;
+ env->regs[15] = initial_pc & ~1;
+ env->thumb = initial_pc & 1;
}
if (env->cp15.c1_sys & SCTLR_V) {
- env->regs[15] = 0xFFFF0000;
+ env->regs[15] = 0xFFFF0000;
}
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
@@ -172,6 +184,8 @@ static void arm_cpu_reset(CPUState *s)
kvm_arm_reset_vcpu(cpu);
}
#endif
+
+ hw_watchpoint_update_all(cpu);
}
#ifndef CONFIG_USER_ONLY
@@ -1051,6 +1065,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
#endif
cc->gdb_num_core_regs = 26;
cc->gdb_core_xml_file = "arm-core.xml";
+ cc->debug_excp_handler = arm_debug_excp_handler;
}
static void cpu_register(const ARMCPUInfo *info)
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 51bedc8262..d1e1ccb605 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -323,6 +323,8 @@ typedef struct CPUARMState {
int eabi;
#endif
+ struct CPUWatchpoint *cpu_watchpoint[16];
+
CPU_COMMON
/* These fields after the common ones so they are preserved on reset. */
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 2b95f33872..ece967397f 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -304,17 +304,6 @@ void init_cpreg_list(ARMCPU *cpu)
g_list_free(keys);
}
-/* Return true if extended addresses are enabled.
- * This is always the case if our translation regime is 64 bit,
- * but depends on TTBCR.EAE for 32 bit.
- */
-static inline bool extended_addresses_enabled(CPUARMState *env)
-{
- return arm_el_is_aa64(env, 1)
- || ((arm_feature(env, ARM_FEATURE_LPAE)
- && (env->cp15.c2_control & TTBCR_EAE)));
-}
-
static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
@@ -388,6 +377,47 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
+/* IS variants of TLB operations must affect all cores */
+static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush(other_cs, 1);
+ }
+}
+
+static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush(other_cs, value == 0);
+ }
+}
+
+static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
+ }
+}
+
+static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
+ }
+}
+
static const ARMCPRegInfo cp_reginfo[] = {
{ .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
@@ -414,21 +444,6 @@ static const ARMCPRegInfo not_v8_cp_reginfo[] = {
*/
{ .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
- /* MMU TLB control. Note that the wildcarding means we cover not just
- * the unified TLB ops but also the dside/iside/inner-shareable variants.
- */
- { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
- .type = ARM_CP_NO_MIGRATE },
- { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
- .type = ARM_CP_NO_MIGRATE },
- { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
- .type = ARM_CP_NO_MIGRATE },
- { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
- .type = ARM_CP_NO_MIGRATE },
/* Cache maintenance ops; some of this space may be overridden later. */
{ .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
.opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
@@ -472,6 +487,21 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
*/
{ .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* MMU TLB control. Note that the wildcarding means we cover not just
+ * the unified TLB ops but also the dside/iside/inner-shareable variants.
+ */
+ { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
+ .type = ARM_CP_NO_MIGRATE },
+ { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
+ .type = ARM_CP_NO_MIGRATE },
+ { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
+ .type = ARM_CP_NO_MIGRATE },
+ { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
+ .type = ARM_CP_NO_MIGRATE },
REGINFO_SENTINEL
};
@@ -890,6 +920,44 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
{ .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
.type = ARM_CP_NO_MIGRATE, .access = PL1_R, .readfn = isr_read },
+ /* 32 bit ITLB invalidates */
+ { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ /* 32 bit DTLB invalidates */
+ { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ /* 32 bit TLB invalidates */
+ { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo v7mp_cp_reginfo[] = {
+ /* 32 bit TLB invalidates, Inner Shareable */
+ { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_is_write },
+ { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
+ { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .writefn = tlbiasid_is_write },
+ { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .writefn = tlbimvaa_is_write },
REGINFO_SENTINEL
};
@@ -1879,6 +1947,39 @@ static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush(CPU(cpu), asid == 0);
}
+static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, pageaddr);
+ }
+}
+
+static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, pageaddr);
+ }
+}
+
+static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ int asid = extract64(value, 48, 16);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush(other_cs, asid == 0);
+ }
+}
+
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
/* We don't implement EL2, so the only control on DC ZVA is the
@@ -1996,27 +2097,27 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbiall_write },
+ .writefn = tlbiall_is_write },
{ .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_va_write },
+ .writefn = tlbi_aa64_va_is_write },
{ .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_asid_write },
+ .writefn = tlbi_aa64_asid_is_write },
{ .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vaa_is_write },
{ .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_va_write },
+ .writefn = tlbi_aa64_va_is_write },
{ .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vaa_is_write },
{ .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
@@ -2056,42 +2157,12 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
#endif
- /* 32 bit TLB invalidates, Inner Shareable */
- { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
- { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
- { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
- { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ /* TLB invalidate last level of translation table walk */
{ .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
{ .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
- /* 32 bit ITLB invalidates */
- { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
- { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
- { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
- /* 32 bit DTLB invalidates */
- { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
- { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
- { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
- /* 32 bit TLB invalidates */
- { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
- { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
- { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
- { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .writefn = tlbimvaa_is_write },
{ .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
@@ -2255,18 +2326,35 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
- /* Dummy implementation of monitor debug system control register:
- * we don't support debug. (The 32-bit alias is DBGDSCRext.)
- */
+ /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
{ .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
.access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
.resetvalue = 0 },
+ /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
+ * We don't implement the configurable EL0 access.
+ */
+ { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_R,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
+ .resetfn = arm_cp_reset_ignore },
/* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
{ .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
.access = PL1_W, .type = ARM_CP_NOP },
+ /* Dummy OSDLR_EL1: 32-bit Linux will read this */
+ { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
+ .access = PL1_RW, .type = ARM_CP_NOP },
+ /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
+ * implement vector catch debug events yet.
+ */
+ { .name = "DBGVCR",
+ .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_NOP },
REGINFO_SENTINEL
};
@@ -2279,20 +2367,149 @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
REGINFO_SENTINEL
};
+void hw_watchpoint_update(ARMCPU *cpu, int n)
+{
+ CPUARMState *env = &cpu->env;
+ vaddr len = 0;
+ vaddr wvr = env->cp15.dbgwvr[n];
+ uint64_t wcr = env->cp15.dbgwcr[n];
+ int mask;
+ int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+
+ if (env->cpu_watchpoint[n]) {
+ cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
+ env->cpu_watchpoint[n] = NULL;
+ }
+
+ if (!extract64(wcr, 0, 1)) {
+ /* E bit clear : watchpoint disabled */
+ return;
+ }
+
+ switch (extract64(wcr, 3, 2)) {
+ case 0:
+ /* LSC 00 is reserved and must behave as if the wp is disabled */
+ return;
+ case 1:
+ flags |= BP_MEM_READ;
+ break;
+ case 2:
+ flags |= BP_MEM_WRITE;
+ break;
+ case 3:
+ flags |= BP_MEM_ACCESS;
+ break;
+ }
+
+ /* Attempts to use both MASK and BAS fields simultaneously are
+ * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
+ * thus generating a watchpoint for every byte in the masked region.
+ */
+ mask = extract64(wcr, 24, 4);
+ if (mask == 1 || mask == 2) {
+ /* Reserved values of MASK; we must act as if the mask value was
+ * some non-reserved value, or as if the watchpoint were disabled.
+ * We choose the latter.
+ */
+ return;
+ } else if (mask) {
+ /* Watchpoint covers an aligned area up to 2GB in size */
+ len = 1ULL << mask;
+ /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
+ * whether the watchpoint fires when the unmasked bits match; we opt
+ * to generate the exceptions.
+ */
+ wvr &= ~(len - 1);
+ } else {
+ /* Watchpoint covers bytes defined by the byte address select bits */
+ int bas = extract64(wcr, 5, 8);
+ int basstart;
+
+ if (bas == 0) {
+ /* This must act as if the watchpoint is disabled */
+ return;
+ }
+
+ if (extract64(wvr, 2, 1)) {
+ /* Deprecated case of an only 4-aligned address. BAS[7:4] are
+ * ignored, and BAS[3:0] define which bytes to watch.
+ */
+ bas &= 0xf;
+ }
+ /* The BAS bits are supposed to be programmed to indicate a contiguous
+ * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
+ * we fire for each byte in the word/doubleword addressed by the WVR.
+ * We choose to ignore any non-zero bits after the first range of 1s.
+ */
+ basstart = ctz32(bas);
+ len = cto32(bas >> basstart);
+ wvr += basstart;
+ }
+
+ cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
+ &env->cpu_watchpoint[n]);
+}
+
+void hw_watchpoint_update_all(ARMCPU *cpu)
+{
+ int i;
+ CPUARMState *env = &cpu->env;
+
+ /* Completely clear out existing QEMU watchpoints and our array, to
+ * avoid possible stale entries following migration load.
+ */
+ cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
+ memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
+
+ for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
+ hw_watchpoint_update(cpu, i);
+ }
+}
+
+static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
+ * register reads and behaves as if values written are sign extended.
+ * Bits [1:0] are RES0.
+ */
+ value = sextract64(value, 0, 49) & ~3ULL;
+
+ raw_write(env, ri, value);
+ hw_watchpoint_update(cpu, i);
+}
+
+static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ raw_write(env, ri, value);
+ hw_watchpoint_update(cpu, i);
+}
+
static void define_debug_regs(ARMCPU *cpu)
{
/* Define v7 and v8 architectural debug registers.
* These are just dummy implementations for now.
*/
int i;
- int wrps, brps;
+ int wrps, brps, ctx_cmps;
ARMCPRegInfo dbgdidr = {
.name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
};
+ /* Note that all these register fields hold "number of Xs minus 1". */
brps = extract32(cpu->dbgdidr, 24, 4);
wrps = extract32(cpu->dbgdidr, 28, 4);
+ ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
+
+ assert(ctx_cmps <= brps);
/* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
* of the debug registers such as number of breakpoints;
@@ -2301,6 +2518,7 @@ static void define_debug_regs(ARMCPU *cpu)
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
+ assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
}
define_one_arm_cp_reg(cpu, &dbgdidr);
@@ -2330,12 +2548,16 @@ static void define_debug_regs(ARMCPU *cpu)
{ .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]) },
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
+ .writefn = dbgwvr_write, .raw_writefn = raw_write
+ },
{ .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]) },
- REGINFO_SENTINEL
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
+ .writefn = dbgwcr_write, .raw_writefn = raw_write
+ },
+ REGINFO_SENTINEL
};
define_arm_cp_regs(cpu, dbgregs);
}
@@ -2434,6 +2656,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V6K)) {
define_arm_cp_regs(cpu, v6k_cp_reginfo);
}
+ if (arm_feature(env, ARM_FEATURE_V7MP)) {
+ define_arm_cp_regs(cpu, v7mp_cp_reginfo);
+ }
if (arm_feature(env, ARM_FEATURE_V7)) {
/* v7 performance monitor control register: same implementor
* field as main ID register, and we implement only the cycle
@@ -3506,11 +3731,37 @@ void arm_cpu_do_interrupt(CPUState *cs)
uint32_t mask;
int new_mode;
uint32_t offset;
+ uint32_t moe;
assert(!IS_M(env));
arm_log_exception(cs->exception_index);
+ /* If this is a debug exception we must update the DBGDSCR.MOE bits */
+ switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
+ case EC_BREAKPOINT:
+ case EC_BREAKPOINT_SAME_EL:
+ moe = 1;
+ break;
+ case EC_WATCHPOINT:
+ case EC_WATCHPOINT_SAME_EL:
+ moe = 10;
+ break;
+ case EC_AA32_BKPT:
+ moe = 3;
+ break;
+ case EC_VECTORCATCH:
+ moe = 5;
+ break;
+ default:
+ moe = 0;
+ break;
+ }
+
+ if (moe) {
+ env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
+ }
+
/* TODO: Vectored interrupt controller. */
switch (cs->exception_index) {
case EXCP_UDEF:
diff --git a/target-arm/internals.h b/target-arm/internals.h
index 53c2e3cf3e..64751a0798 100644
--- a/target-arm/internals.h
+++ b/target-arm/internals.h
@@ -142,6 +142,17 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm)
aarch64_restore_sp(env, cur_el);
}
+/* Return true if extended addresses are enabled.
+ * This is always the case if our translation regime is 64 bit,
+ * but depends on TTBCR.EAE for 32 bit.
+ */
+static inline bool extended_addresses_enabled(CPUARMState *env)
+{
+ return arm_el_is_aa64(env, 1)
+ || ((arm_feature(env, ARM_FEATURE_LPAE)
+ && (env->cp15.c2_control & TTBCR_EAE)));
+}
+
/* Valid Syndrome Register EC field values */
enum arm_exception_class {
EC_UNCATEGORIZED = 0x00,
@@ -296,4 +307,23 @@ static inline uint32_t syn_swstep(int same_el, int isv, int ex)
| (isv << 24) | (ex << 6) | 0x22;
}
+static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
+{
+ return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | (cm << 8) | (wnr << 6) | 0x22;
+}
+
+/* Update a QEMU watchpoint based on the information the guest has set in the
+ * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
+ */
+void hw_watchpoint_update(ARMCPU *cpu, int n);
+/* Update the QEMU watchpoints for every guest watchpoint. This does a
+ * complete delete-and-reinstate of the QEMU watchpoint list and so is
+ * suitable for use after migration or on reset.
+ */
+void hw_watchpoint_update_all(ARMCPU *cpu);
+
+/* Callback function for when a watchpoint or breakpoint triggers. */
+void arm_debug_excp_handler(CPUState *cs);
+
#endif
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 3bcc7cc833..8dfe87cb6b 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -2,6 +2,7 @@
#include "hw/boards.h"
#include "sysemu/kvm.h"
#include "kvm_arm.h"
+#include "internals.h"
static bool vfp_needed(void *opaque)
{
@@ -213,6 +214,8 @@ static int cpu_post_load(void *opaque, int version_id)
}
}
+ hw_watchpoint_update_all(cpu);
+
return 0;
}
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index fe40358c96..b956216c4b 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -456,6 +456,194 @@ illegal_return:
}
}
+/* Return true if the linked breakpoint entry lbn passes its checks */
+static bool linked_bp_matches(ARMCPU *cpu, int lbn)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t bcr = env->cp15.dbgbcr[lbn];
+ int brps = extract32(cpu->dbgdidr, 24, 4);
+ int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
+ int bt;
+ uint32_t contextidr;
+
+ /* Links to unimplemented or non-context aware breakpoints are
+ * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
+ * as if linked to an UNKNOWN context-aware breakpoint (in which
+ * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
+ * We choose the former.
+ */
+ if (lbn > brps || lbn < (brps - ctx_cmps)) {
+ return false;
+ }
+
+ bcr = env->cp15.dbgbcr[lbn];
+
+ if (extract64(bcr, 0, 1) == 0) {
+ /* Linked breakpoint disabled : generate no events */
+ return false;
+ }
+
+ bt = extract64(bcr, 20, 4);
+
+ /* We match the whole register even if this is AArch32 using the
+ * short descriptor format (in which case it holds both PROCID and ASID),
+ * since we don't implement the optional v7 context ID masking.
+ */
+ contextidr = extract64(env->cp15.contextidr_el1, 0, 32);
+
+ switch (bt) {
+ case 3: /* linked context ID match */
+ if (arm_current_pl(env) > 1) {
+ /* Context matches never fire in EL2 or (AArch64) EL3 */
+ return false;
+ }
+ return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
+ case 5: /* linked address mismatch (reserved in AArch64) */
+ case 9: /* linked VMID match (reserved if no EL2) */
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
+ default:
+ /* Links to Unlinked context breakpoints must generate no
+ * events; we choose to do the same for reserved values too.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+static bool wp_matches(ARMCPU *cpu, int n)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t wcr = env->cp15.dbgwcr[n];
+ int pac, hmc, ssc, wt, lbn;
+ /* TODO: check against CPU security state when we implement TrustZone */
+ bool is_secure = false;
+
+ if (!env->cpu_watchpoint[n]
+ || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) {
+ return false;
+ }
+
+ /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
+ * enabled and that the address and access type match; check the
+ * remaining fields, including linked breakpoints.
+ * Note that some combinations of {PAC, HMC SSC} are reserved and
+ * must act either like some valid combination or as if the watchpoint
+ * were disabled. We choose the former, and use this together with
+ * the fact that EL3 must always be Secure and EL2 must always be
+ * Non-Secure to simplify the code slightly compared to the full
+ * table in the ARM ARM.
+ */
+ pac = extract64(wcr, 1, 2);
+ hmc = extract64(wcr, 13, 1);
+ ssc = extract64(wcr, 14, 2);
+
+ switch (ssc) {
+ case 0:
+ break;
+ case 1:
+ case 3:
+ if (is_secure) {
+ return false;
+ }
+ break;
+ case 2:
+ if (!is_secure) {
+ return false;
+ }
+ break;
+ }
+
+ /* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT
+ * "unprivileged access" instructions should match watchpoints as if
+ * they were accesses done at EL0, even if the CPU is at EL1 or higher.
+ * Implementing this would require reworking the core watchpoint code
+ * to plumb the mmu_idx through to this point. Luckily Linux does not
+ * rely on this behaviour currently.
+ */
+ switch (arm_current_pl(env)) {
+ case 3:
+ case 2:
+ if (!hmc) {
+ return false;
+ }
+ break;
+ case 1:
+ if (extract32(pac, 0, 1) == 0) {
+ return false;
+ }
+ break;
+ case 0:
+ if (extract32(pac, 1, 1) == 0) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ wt = extract64(wcr, 20, 1);
+ lbn = extract64(wcr, 16, 4);
+
+ if (wt && !linked_bp_matches(cpu, lbn)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_watchpoints(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ int n;
+
+ /* If watchpoints are disabled globally or we can't take debug
+ * exceptions here then watchpoint firings are ignored.
+ */
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
+ || !arm_generate_debug_exceptions(env)) {
+ return false;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
+ if (wp_matches(cpu, n)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void arm_debug_excp_handler(CPUState *cs)
+{
+ /* Called by core code when a watchpoint or breakpoint fires;
+ * need to check which one and raise the appropriate exception.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ CPUWatchpoint *wp_hit = cs->watchpoint_hit;
+
+ if (wp_hit) {
+ if (wp_hit->flags & BP_CPU) {
+ cs->watchpoint_hit = NULL;
+ if (check_watchpoints(cpu)) {
+ bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
+ bool same_el = arm_debug_target_el(env) == arm_current_pl(env);
+
+ env->exception.syndrome = syn_watchpoint(same_el, 0, wnr);
+ if (extended_addresses_enabled(env)) {
+ env->exception.fsr = (1 << 9) | 0x22;
+ } else {
+ env->exception.fsr = 0x2;
+ }
+ env->exception.vaddress = wp_hit->hitaddr;
+ raise_exception(env, EXCP_DATA_ABORT);
+ } else {
+ cpu_resume_from_signal(cs, NULL);
+ }
+ }
+ }
+}
+
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
The only way to do that in TCG is a conditional branch, which clobbers
all our temporaries. For now implement these as helper functions. */
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 88b64d8b66..90d0a05eb1 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -2843,9 +2843,6 @@ static void x86_cpu_initfn(Object *obj)
if (tcg_enabled() && !inited) {
inited = 1;
optimize_flags_init();
-#ifndef CONFIG_USER_ONLY
- cpu_set_debug_excp_handler(breakpoint_handler);
-#endif
}
}
@@ -2942,6 +2939,9 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->vmsd = &vmstate_x86_cpu;
#endif
cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
+#ifndef CONFIG_USER_ONLY
+ cc->debug_excp_handler = breakpoint_handler;
+#endif
}
static const TypeInfo x86_cpu_type_info = {
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 3460b12139..71b505f56c 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -1121,7 +1121,7 @@ static inline int hw_breakpoint_len(unsigned long dr7, int index)
void hw_breakpoint_insert(CPUX86State *env, int index);
void hw_breakpoint_remove(CPUX86State *env, int index);
bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
-void breakpoint_handler(CPUX86State *env);
+void breakpoint_handler(CPUState *cs);
/* will be suppressed */
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 30cb0d0143..28fefe0a1f 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -1011,9 +1011,10 @@ bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
return hit_enabled;
}
-void breakpoint_handler(CPUX86State *env)
+void breakpoint_handler(CPUState *cs)
{
- CPUState *cs = CPU(x86_env_get_cpu(env));
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
CPUBreakpoint *bp;
if (cs->watchpoint_hit) {
diff --git a/target-lm32/cpu.c b/target-lm32/cpu.c
index c5c20d74c4..419d664845 100644
--- a/target-lm32/cpu.c
+++ b/target-lm32/cpu.c
@@ -158,7 +158,6 @@ static void lm32_cpu_initfn(Object *obj)
if (tcg_enabled() && !tcg_initialized) {
tcg_initialized = true;
lm32_translate_init();
- cpu_set_debug_excp_handler(lm32_debug_excp_handler);
}
}
@@ -273,6 +272,7 @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
cc->vmsd = &vmstate_lm32_cpu;
#endif
cc->gdb_num_core_regs = 32 + 7;
+ cc->debug_excp_handler = lm32_debug_excp_handler;
}
static void lm32_register_cpu_type(const LM32CPUInfo *info)
diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h
index 70600aa47a..0dab6e89ab 100644
--- a/target-lm32/cpu.h
+++ b/target-lm32/cpu.h
@@ -211,7 +211,7 @@ void lm32_cpu_list(FILE *f, fprintf_function cpu_fprintf);
void lm32_translate_init(void);
void cpu_lm32_set_phys_msb_ignore(CPULM32State *env, int value);
void QEMU_NORETURN raise_exception(CPULM32State *env, int index);
-void lm32_debug_excp_handler(CPULM32State *env);
+void lm32_debug_excp_handler(CPUState *cs);
void lm32_breakpoint_insert(CPULM32State *env, int index, target_ulong address);
void lm32_breakpoint_remove(CPULM32State *env, int index);
void lm32_watchpoint_insert(CPULM32State *env, int index, target_ulong address,
diff --git a/target-lm32/helper.c b/target-lm32/helper.c
index 1bca1961af..ad724aecbc 100644
--- a/target-lm32/helper.c
+++ b/target-lm32/helper.c
@@ -125,9 +125,10 @@ static bool check_watchpoints(CPULM32State *env)
return false;
}
-void lm32_debug_excp_handler(CPULM32State *env)
+void lm32_debug_excp_handler(CPUState *cs)
{
- CPUState *cs = CPU(lm32_env_get_cpu(env));
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
CPUBreakpoint *bp;
if (cs->watchpoint_hit) {
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index 509eae52ff..0cfdc8ab8f 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -28,7 +28,6 @@ DEF_HELPER_2(icbi, void, env, tl)
DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32)
#if defined(TARGET_PPC64)
-DEF_HELPER_3(mulldo, i64, env, i64, i64)
DEF_HELPER_4(divdeu, i64, env, i64, i64, i32)
DEF_HELPER_4(divde, i64, env, i64, i64, i32)
#endif
diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c
index f6e8846707..713d777076 100644
--- a/target-ppc/int_helper.c
+++ b/target-ppc/int_helper.c
@@ -24,23 +24,6 @@
#include "helper_regs.h"
/*****************************************************************************/
/* Fixed point operations helpers */
-#if defined(TARGET_PPC64)
-
-uint64_t helper_mulldo(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
-{
- int64_t th;
- uint64_t tl;
-
- muls64(&tl, (uint64_t *)&th, arg1, arg2);
- /* If th != 0 && th != -1, then we had an overflow */
- if (likely((uint64_t)(th + 1) <= 1)) {
- env->ov = 0;
- } else {
- env->so = env->ov = 1;
- }
- return (int64_t)tl;
-}
-#endif
target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb,
uint32_t oe)
@@ -238,7 +221,7 @@ target_ulong helper_srad(CPUPPCState *env, target_ulong value,
if (likely((uint64_t)shift != 0)) {
shift &= 0x3f;
ret = (int64_t)value >> shift;
- if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
+ if (likely(ret >= 0 || (value & ((1ULL << shift) - 1)) == 0)) {
env->ca = 0;
} else {
env->ca = 1;
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index 42718f77ae..9c23c6ba0d 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -38,6 +38,7 @@
#include "hw/ppc/ppc.h"
#include "sysemu/watchdog.h"
#include "trace.h"
+#include "exec/gdbstub.h"
//#define DEBUG_KVM
@@ -72,6 +73,8 @@ static int cap_papr;
static int cap_htab_fd;
static int cap_fixup_hcalls;
+static uint32_t debug_inst_opcode;
+
/* XXX We have a race condition where we actually have a level triggered
* interrupt, but the infrastructure can't expose that yet, so the guest
* takes but ignores it, goes to sleep and never gets notified that there's
@@ -410,6 +413,38 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
}
+/* e500 supports 2 h/w breakpoint and 2 watchpoint.
+ * book3s supports only 1 watchpoint, so array size
+ * of 4 is sufficient for now.
+ */
+#define MAX_HW_BKPTS 4
+
+static struct HWBreakpoint {
+ target_ulong addr;
+ int type;
+} hw_debug_points[MAX_HW_BKPTS];
+
+static CPUWatchpoint hw_watchpoint;
+
+/* Default there is no breakpoint and watchpoint supported */
+static int max_hw_breakpoint;
+static int max_hw_watchpoint;
+static int nb_hw_breakpoint;
+static int nb_hw_watchpoint;
+
+static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
+{
+ if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
+ max_hw_breakpoint = 2;
+ max_hw_watchpoint = 2;
+ }
+
+ if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
+ fprintf(stderr, "Error initializing h/w breakpoints\n");
+ return;
+ }
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
@@ -436,6 +471,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
break;
}
+ kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
+ kvmppc_hw_debug_points_init(cenv);
+
return ret;
}
@@ -899,6 +937,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
+static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
+{
+ env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
+}
+
int kvm_arch_get_registers(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
@@ -981,35 +1024,57 @@ int kvm_arch_get_registers(CPUState *cs)
if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
+ kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
+ kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
+ kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
+ kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
+ kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
+ kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
+ kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
+ kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
+ kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
+ kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
+ kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
+ kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
+ kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
+ kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
+ kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
+ kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
if (sregs.u.e.features & KVM_SREGS_E_SPE) {
env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
+ kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
}
if (sregs.u.e.features & KVM_SREGS_E_PM) {
env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
+ kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
}
if (sregs.u.e.features & KVM_SREGS_E_PC) {
env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
}
}
@@ -1244,6 +1309,259 @@ static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t dat
return 0;
}
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ /* Mixed endian case is not handled */
+ uint32_t sc = debug_inst_opcode;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sizeof(sc), 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ uint32_t sc;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
+ sc != debug_inst_opcode ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sizeof(sc), 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int find_hw_breakpoint(target_ulong addr, int type)
+{
+ int n;
+
+ assert((nb_hw_breakpoint + nb_hw_watchpoint)
+ <= ARRAY_SIZE(hw_debug_points));
+
+ for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
+ if (hw_debug_points[n].addr == addr &&
+ hw_debug_points[n].type == type) {
+ return n;
+ }
+ }
+
+ return -1;
+}
+
+static int find_hw_watchpoint(target_ulong addr, int *flag)
+{
+ int n;
+
+ n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
+ if (n >= 0) {
+ *flag = BP_MEM_ACCESS;
+ return n;
+ }
+
+ n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
+ if (n >= 0) {
+ *flag = BP_MEM_WRITE;
+ return n;
+ }
+
+ n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
+ if (n >= 0) {
+ *flag = BP_MEM_READ;
+ return n;
+ }
+
+ return -1;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
+ return -ENOBUFS;
+ }
+
+ hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
+ hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
+
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ if (nb_hw_breakpoint >= max_hw_breakpoint) {
+ return -ENOBUFS;
+ }
+
+ if (find_hw_breakpoint(addr, type) >= 0) {
+ return -EEXIST;
+ }
+
+ nb_hw_breakpoint++;
+ break;
+
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_ACCESS:
+ if (nb_hw_watchpoint >= max_hw_watchpoint) {
+ return -ENOBUFS;
+ }
+
+ if (find_hw_breakpoint(addr, type) >= 0) {
+ return -EEXIST;
+ }
+
+ nb_hw_watchpoint++;
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int n;
+
+ n = find_hw_breakpoint(addr, type);
+ if (n < 0) {
+ return -ENOENT;
+ }
+
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ nb_hw_breakpoint--;
+ break;
+
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_ACCESS:
+ nb_hw_watchpoint--;
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+ hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
+
+ return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ nb_hw_breakpoint = nb_hw_watchpoint = 0;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
+{
+ int n;
+
+ /* Software Breakpoint updates */
+ if (kvm_sw_breakpoints_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ }
+
+ assert((nb_hw_breakpoint + nb_hw_watchpoint)
+ <= ARRAY_SIZE(hw_debug_points));
+ assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
+
+ if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
+ for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
+ switch (hw_debug_points[n].type) {
+ case GDB_BREAKPOINT_HW:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
+ break;
+ case GDB_WATCHPOINT_READ:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
+ break;
+ case GDB_WATCHPOINT_ACCESS:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
+ KVMPPC_DEBUG_WATCH_READ;
+ break;
+ default:
+ cpu_abort(cs, "Unsupported breakpoint type\n");
+ }
+ dbg->arch.bp[n].addr = hw_debug_points[n].addr;
+ }
+ }
+}
+
+static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
+ int handle = 0;
+ int n;
+ int flag = 0;
+
+ if (cs->singlestep_enabled) {
+ handle = 1;
+ } else if (arch_info->status) {
+ if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
+ if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
+ n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
+ if (n >= 0) {
+ handle = 1;
+ }
+ } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
+ KVMPPC_DEBUG_WATCH_WRITE)) {
+ n = find_hw_watchpoint(arch_info->address, &flag);
+ if (n >= 0) {
+ handle = 1;
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = hw_debug_points[n].addr;
+ hw_watchpoint.flags = flag;
+ }
+ }
+ }
+ } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
+ handle = 1;
+ } else {
+ /* QEMU is not able to handle debug exception, so inject
+ * program exception to guest;
+ * Yes program exception NOT debug exception !!
+ * When QEMU is using debug resources then debug exception must
+ * be always set. To achieve this we set MSR_DE and also set
+ * MSRP_DEP so guest cannot change MSR_DE.
+ * When emulating debug resource for guest we want guest
+ * to control MSR_DE (enable/disable debug interrupt on need).
+ * Supporting both configurations are NOT possible.
+ * So the result is that we cannot share debug resources
+ * between QEMU and Guest on BOOKE architecture.
+ * In the current design QEMU gets the priority over guest,
+ * this means that if QEMU is using debug resources then guest
+ * cannot use them;
+ * For software breakpoint QEMU uses a privileged instruction;
+ * So there cannot be any reason that we are here for guest
+ * set debug exception, only possibility is guest executed a
+ * privileged / illegal instruction and that's why we are
+ * injecting a program interrupt.
+ */
+
+ cpu_synchronize_state(cs);
+ /* env->nip is PC, so increment this by 4 to use
+ * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
+ */
+ env->nip += 4;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_INVAL;
+ ppc_cpu_do_interrupt(cs);
+ }
+
+ return handle;
+}
+
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
@@ -1284,6 +1602,16 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
ret = 0;
break;
+ case KVM_EXIT_DEBUG:
+ DPRINTF("handle debug exception\n");
+ if (kvm_handle_debug(cpu, run)) {
+ ret = EXCP_DEBUG;
+ break;
+ }
+ /* re-enter, this exception was guest-internal */
+ ret = 0;
+ break;
+
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
ret = -1;
@@ -1369,7 +1697,7 @@ static int read_cpuinfo(const char *field, char *value, int len)
}
do {
- if(!fgets(line, sizeof(line), f)) {
+ if (!fgets(line, sizeof(line), f)) {
break;
}
if (!strncmp(line, field, field_len)) {
@@ -1404,6 +1732,17 @@ uint32_t kvmppc_get_tbfreq(void)
return retval;
}
+bool kvmppc_get_host_serial(char **value)
+{
+ return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
+ NULL);
+}
+
+bool kvmppc_get_host_model(char **value)
+{
+ return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
+}
+
/* Try to find a device tree node for a CPU with clock-frequency property */
static int kvmppc_find_cpu_dt(char *buf, int buf_len)
{
@@ -1496,7 +1835,7 @@ static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
PowerPCCPU *cpu = ppc_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
+ if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
!kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
return 0;
}
@@ -1965,34 +2304,6 @@ void kvm_arch_init_irq_routing(KVMState *s)
{
}
-int kvm_arch_insert_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
-{
- return -EINVAL;
-}
-
-int kvm_arch_remove_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
-{
- return -EINVAL;
-}
-
-int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type)
-{
- return -EINVAL;
-}
-
-int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type)
-{
- return -EINVAL;
-}
-
-void kvm_arch_remove_all_hw_breakpoints(void)
-{
-}
-
-void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
-{
-}
-
struct kvm_get_htab_buf {
struct kvm_get_htab_header header;
/*
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index d9516e73ef..2e0224c6af 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -19,6 +19,8 @@ uint32_t kvmppc_get_tbfreq(void);
uint64_t kvmppc_get_clockfreq(void);
uint32_t kvmppc_get_vmx(void);
uint32_t kvmppc_get_dfp(void);
+bool kvmppc_get_host_model(char **buf);
+bool kvmppc_get_host_serial(char **buf);
int kvmppc_get_hasidle(CPUPPCState *env);
int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len);
int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level);
@@ -60,6 +62,16 @@ static inline uint32_t kvmppc_get_tbfreq(void)
return 0;
}
+static inline bool kvmppc_get_host_model(char **buf)
+{
+ return false;
+}
+
+static inline bool kvmppc_get_host_serial(char **buf)
+{
+ return false;
+}
+
static inline uint64_t kvmppc_get_clockfreq(void)
{
return 0;
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index c07bb01a7a..d03daeaa48 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -1128,9 +1128,19 @@ static void gen_mulhwu(DisasContext *ctx)
/* mullw mullw. */
static void gen_mullw(DisasContext *ctx)
{
- tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
- cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_ext32s_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)]);
+#if defined(TARGET_PPC64)
+ TCGv_i64 t0, t1;
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#else
+ tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+#endif
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
@@ -1144,7 +1154,11 @@ static void gen_mullwo(DisasContext *ctx)
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
tcg_gen_muls2_i32(t0, t1, t0, t1);
- tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#if defined(TARGET_PPC64)
+ tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
+#else
+ tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
tcg_gen_sari_i32(t0, t0, 31);
tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
@@ -1201,8 +1215,20 @@ static void gen_mulld(DisasContext *ctx)
/* mulldo mulldo. */
static void gen_mulldo(DisasContext *ctx)
{
- gen_helper_mulldo(cpu_gpr[rD(ctx->opcode)], cpu_env,
- cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
+
+ tcg_gen_sari_i64(t0, t0, 63);
+ tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
@@ -1616,18 +1642,17 @@ static void gen_rlwimi(DisasContext *ctx)
mb = MB(ctx->opcode);
me = ME(ctx->opcode);
sh = SH(ctx->opcode);
- if (likely(sh == 0 && mb == 0 && me == 31)) {
- tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (likely(sh == (31-me) && mb <= me)) {
+ tcg_gen_deposit_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)], sh, me - mb + 1);
} else {
target_ulong mask;
TCGv t1;
TCGv t0 = tcg_temp_new();
#if defined(TARGET_PPC64)
- TCGv_i32 t2 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(t2, cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_rotli_i32(t2, t2, sh);
- tcg_gen_extu_i32_i64(t0, t2);
- tcg_temp_free_i32(t2);
+ tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)], 32, 32);
+ tcg_gen_rotli_i64(t0, t0, sh);
#else
tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
#endif
@@ -1672,14 +1697,18 @@ static void gen_rlwinm(DisasContext *ctx)
tcg_gen_shri_tl(t0, t0, mb);
tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
tcg_temp_free(t0);
+ } else if (likely(mb == 0 && me == 31)) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_rotli_i32(t0, t0, sh);
+ tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
} else {
TCGv t0 = tcg_temp_new();
#if defined(TARGET_PPC64)
- TCGv_i32 t1 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_rotli_i32(t1, t1, sh);
- tcg_gen_extu_i32_i64(t0, t1);
- tcg_temp_free_i32(t1);
+ tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)], 32, 32);
+ tcg_gen_rotli_i64(t0, t0, sh);
#else
tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
#endif
@@ -1698,37 +1727,49 @@ static void gen_rlwinm(DisasContext *ctx)
static void gen_rlwnm(DisasContext *ctx)
{
uint32_t mb, me;
- TCGv t0;
+ mb = MB(ctx->opcode);
+ me = ME(ctx->opcode);
+
+ if (likely(mb == 0 && me == 31)) {
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_andi_i32(t0, t0, 0x1f);
+ tcg_gen_rotl_i32(t1, t1, t0);
+ tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ } else {
+ TCGv t0;
#if defined(TARGET_PPC64)
- TCGv_i32 t1, t2;
+ TCGv t1;
#endif
- mb = MB(ctx->opcode);
- me = ME(ctx->opcode);
- t0 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
#if defined(TARGET_PPC64)
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_trunc_i64_i32(t2, t0);
- tcg_gen_rotl_i32(t1, t1, t2);
- tcg_gen_extu_i32_i64(t0, t1);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
+ t1 = tcg_temp_new_i64();
+ tcg_gen_deposit_i64(t1, cpu_gpr[rS(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)], 32, 32);
+ tcg_gen_rotl_i64(t0, t1, t0);
+ tcg_temp_free_i64(t1);
#else
- tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
#endif
- if (unlikely(mb != 0 || me != 31)) {
+ if (unlikely(mb != 0 || me != 31)) {
#if defined(TARGET_PPC64)
- mb += 32;
- me += 32;
+ mb += 32;
+ me += 32;
#endif
- tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
- } else {
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
+ } else {
+ tcg_gen_andi_tl(t0, t0, MASK(32, 63));
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ }
+ tcg_temp_free(t0);
}
- tcg_temp_free(t0);
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
@@ -1924,7 +1965,7 @@ static void gen_srawi(DisasContext *ctx)
TCGv dst = cpu_gpr[rA(ctx->opcode)];
TCGv src = cpu_gpr[rS(ctx->opcode)];
if (sh == 0) {
- tcg_gen_mov_tl(dst, src);
+ tcg_gen_ext32s_tl(dst, src);
tcg_gen_movi_tl(cpu_ca, 0);
} else {
TCGv t0;
diff --git a/target-xtensa/cpu.c b/target-xtensa/cpu.c
index 9d8801b70e..936d526d41 100644
--- a/target-xtensa/cpu.c
+++ b/target-xtensa/cpu.c
@@ -119,7 +119,6 @@ static void xtensa_cpu_initfn(Object *obj)
if (tcg_enabled() && !tcg_inited) {
tcg_inited = true;
xtensa_translate_init();
- cpu_set_debug_excp_handler(xtensa_breakpoint_handler);
}
}
@@ -151,6 +150,7 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
#endif
+ cc->debug_excp_handler = xtensa_breakpoint_handler;
dc->vmsd = &vmstate_xtensa_cpu;
}
diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h
index d797d2649a..9cf52758c7 100644
--- a/target-xtensa/cpu.h
+++ b/target-xtensa/cpu.h
@@ -390,7 +390,7 @@ static inline CPUXtensaState *cpu_init(const char *cpu_model)
}
void xtensa_translate_init(void);
-void xtensa_breakpoint_handler(CPUXtensaState *env);
+void xtensa_breakpoint_handler(CPUState *cs);
int cpu_xtensa_exec(CPUXtensaState *s);
void xtensa_register_core(XtensaConfigList *node);
void check_interrupts(CPUXtensaState *s);
diff --git a/target-xtensa/helper.c b/target-xtensa/helper.c
index 94dcd9442e..6671e40289 100644
--- a/target-xtensa/helper.c
+++ b/target-xtensa/helper.c
@@ -79,9 +79,10 @@ static uint32_t check_hw_breakpoints(CPUXtensaState *env)
return 0;
}
-void xtensa_breakpoint_handler(CPUXtensaState *env)
+void xtensa_breakpoint_handler(CPUState *cs)
{
- CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
if (cs->watchpoint_hit) {
if (cs->watchpoint_hit->flags & BP_CPU) {
diff --git a/tests/Makefile b/tests/Makefile
index 469c0a5e44..d5db97ba63 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -299,6 +299,7 @@ libqos-obj-y += tests/libqos/i2c.o
libqos-pc-obj-y = $(libqos-obj-y) tests/libqos/pci-pc.o
libqos-pc-obj-y += tests/libqos/malloc-pc.o
libqos-omap-obj-y = $(libqos-obj-y) tests/libqos/i2c-omap.o
+libqos-virtio-obj-y = $(libqos-obj-y) $(libqos-pc-obj-y) tests/libqos/virtio.o tests/libqos/virtio-pci.o
tests/rtc-test$(EXESUF): tests/rtc-test.o
tests/m48t59-test$(EXESUF): tests/m48t59-test.o
@@ -320,7 +321,7 @@ tests/vmxnet3-test$(EXESUF): tests/vmxnet3-test.o
tests/ne2000-test$(EXESUF): tests/ne2000-test.o
tests/wdt_ib700-test$(EXESUF): tests/wdt_ib700-test.o
tests/virtio-balloon-test$(EXESUF): tests/virtio-balloon-test.o
-tests/virtio-blk-test$(EXESUF): tests/virtio-blk-test.o
+tests/virtio-blk-test$(EXESUF): tests/virtio-blk-test.o $(libqos-virtio-obj-y)
tests/virtio-net-test$(EXESUF): tests/virtio-net-test.o
tests/virtio-rng-test$(EXESUF): tests/virtio-rng-test.o
tests/virtio-scsi-test$(EXESUF): tests/virtio-scsi-test.o
diff --git a/tests/ide-test.c b/tests/ide-test.c
index ffce6ed669..b7a97e9362 100644
--- a/tests/ide-test.c
+++ b/tests/ide-test.c
@@ -126,6 +126,8 @@ static void ide_test_start(const char *cmdline_fmt, ...)
static void ide_test_quit(void)
{
+ pc_alloc_uninit(guest_malloc);
+ guest_malloc = NULL;
qtest_end();
}
diff --git a/tests/libqos/malloc-pc.c b/tests/libqos/malloc-pc.c
index be1d97f8bb..f4218c6451 100644
--- a/tests/libqos/malloc-pc.c
+++ b/tests/libqos/malloc-pc.c
@@ -17,45 +17,294 @@
#include "hw/nvram/fw_cfg.h"
#include "qemu-common.h"
+#include "qemu/queue.h"
#include <glib.h>
#define PAGE_SIZE (4096)
+#define MLIST_ENTNAME entries
+typedef QTAILQ_HEAD(MemList, MemBlock) MemList;
+typedef struct MemBlock {
+ QTAILQ_ENTRY(MemBlock) MLIST_ENTNAME;
+ uint64_t size;
+ uint64_t addr;
+} MemBlock;
+
typedef struct PCAlloc
{
QGuestAllocator alloc;
-
+ PCAllocOpts opts;
uint64_t start;
uint64_t end;
+
+ MemList used;
+ MemList free;
} PCAlloc;
-static uint64_t pc_alloc(QGuestAllocator *allocator, size_t size)
+static MemBlock *mlist_new(uint64_t addr, uint64_t size)
{
- PCAlloc *s = container_of(allocator, PCAlloc, alloc);
- uint64_t addr;
+ MemBlock *block;
+
+ if (!size) {
+ return NULL;
+ }
+ block = g_malloc0(sizeof(MemBlock));
+ block->addr = addr;
+ block->size = size;
- size += (PAGE_SIZE - 1);
- size &= -PAGE_SIZE;
+ return block;
+}
+
+static void mlist_delete(MemList *list, MemBlock *node)
+{
+ g_assert(list && node);
+ QTAILQ_REMOVE(list, node, MLIST_ENTNAME);
+ g_free(node);
+}
+
+static MemBlock *mlist_find_key(MemList *head, uint64_t addr)
+{
+ MemBlock *node;
+ QTAILQ_FOREACH(node, head, MLIST_ENTNAME) {
+ if (node->addr == addr) {
+ return node;
+ }
+ }
+ return NULL;
+}
+
+static MemBlock *mlist_find_space(MemList *head, uint64_t size)
+{
+ MemBlock *node;
+
+ QTAILQ_FOREACH(node, head, MLIST_ENTNAME) {
+ if (node->size >= size) {
+ return node;
+ }
+ }
+ return NULL;
+}
+
+static MemBlock *mlist_sort_insert(MemList *head, MemBlock *insr)
+{
+ MemBlock *node;
+ g_assert(head && insr);
+
+ QTAILQ_FOREACH(node, head, MLIST_ENTNAME) {
+ if (insr->addr < node->addr) {
+ QTAILQ_INSERT_BEFORE(node, insr, MLIST_ENTNAME);
+ return insr;
+ }
+ }
+
+ QTAILQ_INSERT_TAIL(head, insr, MLIST_ENTNAME);
+ return insr;
+}
+
+static inline uint64_t mlist_boundary(MemBlock *node)
+{
+ return node->size + node->addr;
+}
+
+static MemBlock *mlist_join(MemList *head, MemBlock *left, MemBlock *right)
+{
+ g_assert(head && left && right);
+
+ left->size += right->size;
+ mlist_delete(head, right);
+ return left;
+}
+
+static void mlist_coalesce(MemList *head, MemBlock *node)
+{
+ g_assert(node);
+ MemBlock *left;
+ MemBlock *right;
+ char merge;
+
+ do {
+ merge = 0;
+ left = QTAILQ_PREV(node, MemList, MLIST_ENTNAME);
+ right = QTAILQ_NEXT(node, MLIST_ENTNAME);
+
+ /* clowns to the left of me */
+ if (left && mlist_boundary(left) == node->addr) {
+ node = mlist_join(head, left, node);
+ merge = 1;
+ }
+
+ /* jokers to the right */
+ if (right && mlist_boundary(node) == right->addr) {
+ node = mlist_join(head, node, right);
+ merge = 1;
+ }
+
+ } while (merge);
+}
+
+static uint64_t pc_mlist_fulfill(PCAlloc *s, MemBlock *freenode, uint64_t size)
+{
+ uint64_t addr;
+ MemBlock *usednode;
- g_assert_cmpint((s->start + size), <=, s->end);
+ g_assert(freenode);
+ g_assert_cmpint(freenode->size, >=, size);
- addr = s->start;
- s->start += size;
+ addr = freenode->addr;
+ if (freenode->size == size) {
+ /* re-use this freenode as our used node */
+ QTAILQ_REMOVE(&s->free, freenode, MLIST_ENTNAME);
+ usednode = freenode;
+ } else {
+ /* adjust the free node and create a new used node */
+ freenode->addr += size;
+ freenode->size -= size;
+ usednode = mlist_new(addr, size);
+ }
+ mlist_sort_insert(&s->used, usednode);
return addr;
}
+/* To assert the correctness of the list.
+ * Used only if PC_ALLOC_PARANOID is set. */
+static void pc_mlist_check(PCAlloc *s)
+{
+ MemBlock *node;
+ uint64_t addr = s->start > 0 ? s->start - 1 : 0;
+ uint64_t next = s->start;
+
+ QTAILQ_FOREACH(node, &s->free, MLIST_ENTNAME) {
+ g_assert_cmpint(node->addr, >, addr);
+ g_assert_cmpint(node->addr, >=, next);
+ addr = node->addr;
+ next = node->addr + node->size;
+ }
+
+ addr = s->start > 0 ? s->start - 1 : 0;
+ next = s->start;
+ QTAILQ_FOREACH(node, &s->used, MLIST_ENTNAME) {
+ g_assert_cmpint(node->addr, >, addr);
+ g_assert_cmpint(node->addr, >=, next);
+ addr = node->addr;
+ next = node->addr + node->size;
+ }
+}
+
+static uint64_t pc_mlist_alloc(PCAlloc *s, uint64_t size)
+{
+ MemBlock *node;
+
+ node = mlist_find_space(&s->free, size);
+ if (!node) {
+ fprintf(stderr, "Out of guest memory.\n");
+ g_assert_not_reached();
+ }
+ return pc_mlist_fulfill(s, node, size);
+}
+
+static void pc_mlist_free(PCAlloc *s, uint64_t addr)
+{
+ MemBlock *node;
+
+ if (addr == 0) {
+ return;
+ }
+
+ node = mlist_find_key(&s->used, addr);
+ if (!node) {
+ fprintf(stderr, "Error: no record found for an allocation at "
+ "0x%016" PRIx64 ".\n",
+ addr);
+ g_assert_not_reached();
+ }
+
+ /* Rip it out of the used list and re-insert back into the free list. */
+ QTAILQ_REMOVE(&s->used, node, MLIST_ENTNAME);
+ mlist_sort_insert(&s->free, node);
+ mlist_coalesce(&s->free, node);
+}
+
+static uint64_t pc_alloc(QGuestAllocator *allocator, size_t size)
+{
+ PCAlloc *s = container_of(allocator, PCAlloc, alloc);
+ uint64_t rsize = size;
+ uint64_t naddr;
+
+ rsize += (PAGE_SIZE - 1);
+ rsize &= -PAGE_SIZE;
+ g_assert_cmpint((s->start + rsize), <=, s->end);
+ g_assert_cmpint(rsize, >=, size);
+
+ naddr = pc_mlist_alloc(s, rsize);
+ if (s->opts & PC_ALLOC_PARANOID) {
+ pc_mlist_check(s);
+ }
+
+ return naddr;
+}
+
static void pc_free(QGuestAllocator *allocator, uint64_t addr)
{
+ PCAlloc *s = container_of(allocator, PCAlloc, alloc);
+
+ pc_mlist_free(s, addr);
+ if (s->opts & PC_ALLOC_PARANOID) {
+ pc_mlist_check(s);
+ }
+}
+
+/*
+ * Mostly for valgrind happiness, but it does offer
+ * a chokepoint for debugging guest memory leaks, too.
+ */
+void pc_alloc_uninit(QGuestAllocator *allocator)
+{
+ PCAlloc *s = container_of(allocator, PCAlloc, alloc);
+ MemBlock *node;
+ MemBlock *tmp;
+ PCAllocOpts mask;
+
+ /* Check for guest leaks, and destroy the list. */
+ QTAILQ_FOREACH_SAFE(node, &s->used, MLIST_ENTNAME, tmp) {
+ if (s->opts & (PC_ALLOC_LEAK_WARN | PC_ALLOC_LEAK_ASSERT)) {
+ fprintf(stderr, "guest malloc leak @ 0x%016" PRIx64 "; "
+ "size 0x%016" PRIx64 ".\n",
+ node->addr, node->size);
+ }
+ if (s->opts & (PC_ALLOC_LEAK_ASSERT)) {
+ g_assert_not_reached();
+ }
+ g_free(node);
+ }
+
+ /* If we have previously asserted that there are no leaks, then there
+ * should be only one node here with a specific address and size. */
+ mask = PC_ALLOC_LEAK_ASSERT | PC_ALLOC_PARANOID;
+ QTAILQ_FOREACH_SAFE(node, &s->free, MLIST_ENTNAME, tmp) {
+ if ((s->opts & mask) == mask) {
+ if ((node->addr != s->start) ||
+ (node->size != s->end - s->start)) {
+ fprintf(stderr, "Free list is corrupted.\n");
+ g_assert_not_reached();
+ }
+ }
+
+ g_free(node);
+ }
+
+ g_free(s);
}
-QGuestAllocator *pc_alloc_init(void)
+QGuestAllocator *pc_alloc_init_flags(PCAllocOpts flags)
{
PCAlloc *s = g_malloc0(sizeof(*s));
uint64_t ram_size;
QFWCFG *fw_cfg = pc_fw_cfg_init();
+ MemBlock *node;
+ s->opts = flags;
s->alloc.alloc = pc_alloc;
s->alloc.free = pc_free;
@@ -70,5 +319,16 @@ QGuestAllocator *pc_alloc_init(void)
/* clean-up */
g_free(fw_cfg);
+ QTAILQ_INIT(&s->used);
+ QTAILQ_INIT(&s->free);
+
+ node = mlist_new(s->start, s->end - s->start);
+ QTAILQ_INSERT_HEAD(&s->free, node, MLIST_ENTNAME);
+
return &s->alloc;
}
+
+inline QGuestAllocator *pc_alloc_init(void)
+{
+ return pc_alloc_init_flags(PC_ALLOC_NO_FLAGS);
+}
diff --git a/tests/libqos/malloc-pc.h b/tests/libqos/malloc-pc.h
index ff964abe53..9f525e3b99 100644
--- a/tests/libqos/malloc-pc.h
+++ b/tests/libqos/malloc-pc.h
@@ -15,6 +15,15 @@
#include "libqos/malloc.h"
+typedef enum {
+ PC_ALLOC_NO_FLAGS = 0x00,
+ PC_ALLOC_LEAK_WARN = 0x01,
+ PC_ALLOC_LEAK_ASSERT = 0x02,
+ PC_ALLOC_PARANOID = 0x04
+} PCAllocOpts;
+
QGuestAllocator *pc_alloc_init(void);
+QGuestAllocator *pc_alloc_init_flags(PCAllocOpts flags);
+void pc_alloc_uninit(QGuestAllocator *allocator);
#endif
diff --git a/tests/libqos/pci.c b/tests/libqos/pci.c
index ce0b308a83..d5ce683d77 100644
--- a/tests/libqos/pci.c
+++ b/tests/libqos/pci.c
@@ -15,8 +15,6 @@
#include "hw/pci/pci_regs.h"
#include <glib.h>
-#include <stdio.h>
-
void qpci_device_foreach(QPCIBus *bus, int vendor_id, int device_id,
void (*func)(QPCIDevice *dev, int devfn, void *data),
void *data)
@@ -75,6 +73,115 @@ void qpci_device_enable(QPCIDevice *dev)
qpci_config_writew(dev, PCI_COMMAND, cmd);
}
+uint8_t qpci_find_capability(QPCIDevice *dev, uint8_t id)
+{
+ uint8_t cap;
+ uint8_t addr = qpci_config_readb(dev, PCI_CAPABILITY_LIST);
+
+ do {
+ cap = qpci_config_readb(dev, addr);
+ if (cap != id) {
+ addr = qpci_config_readb(dev, addr + PCI_CAP_LIST_NEXT);
+ }
+ } while (cap != id && addr != 0);
+
+ return addr;
+}
+
+void qpci_msix_enable(QPCIDevice *dev)
+{
+ uint8_t addr;
+ uint16_t val;
+ uint32_t table;
+ uint8_t bir_table;
+ uint8_t bir_pba;
+ void *offset;
+
+ addr = qpci_find_capability(dev, PCI_CAP_ID_MSIX);
+ g_assert_cmphex(addr, !=, 0);
+
+ val = qpci_config_readw(dev, addr + PCI_MSIX_FLAGS);
+ qpci_config_writew(dev, addr + PCI_MSIX_FLAGS, val | PCI_MSIX_FLAGS_ENABLE);
+
+ table = qpci_config_readl(dev, addr + PCI_MSIX_TABLE);
+ bir_table = table & PCI_MSIX_FLAGS_BIRMASK;
+ offset = qpci_iomap(dev, bir_table, NULL);
+ dev->msix_table = offset + (table & ~PCI_MSIX_FLAGS_BIRMASK);
+
+ table = qpci_config_readl(dev, addr + PCI_MSIX_PBA);
+ bir_pba = table & PCI_MSIX_FLAGS_BIRMASK;
+ if (bir_pba != bir_table) {
+ offset = qpci_iomap(dev, bir_pba, NULL);
+ }
+ dev->msix_pba = offset + (table & ~PCI_MSIX_FLAGS_BIRMASK);
+
+ g_assert(dev->msix_table != NULL);
+ g_assert(dev->msix_pba != NULL);
+ dev->msix_enabled = true;
+}
+
+void qpci_msix_disable(QPCIDevice *dev)
+{
+ uint8_t addr;
+ uint16_t val;
+
+ g_assert(dev->msix_enabled);
+ addr = qpci_find_capability(dev, PCI_CAP_ID_MSIX);
+ g_assert_cmphex(addr, !=, 0);
+ val = qpci_config_readw(dev, addr + PCI_MSIX_FLAGS);
+ qpci_config_writew(dev, addr + PCI_MSIX_FLAGS,
+ val & ~PCI_MSIX_FLAGS_ENABLE);
+
+ qpci_iounmap(dev, dev->msix_table);
+ qpci_iounmap(dev, dev->msix_pba);
+ dev->msix_enabled = 0;
+ dev->msix_table = NULL;
+ dev->msix_pba = NULL;
+}
+
+bool qpci_msix_pending(QPCIDevice *dev, uint16_t entry)
+{
+ uint32_t pba_entry;
+ uint8_t bit_n = entry % 32;
+ void *addr = dev->msix_pba + (entry / 32) * PCI_MSIX_ENTRY_SIZE / 4;
+
+ g_assert(dev->msix_enabled);
+ pba_entry = qpci_io_readl(dev, addr);
+ qpci_io_writel(dev, addr, pba_entry & ~(1 << bit_n));
+ return (pba_entry & (1 << bit_n)) != 0;
+}
+
+bool qpci_msix_masked(QPCIDevice *dev, uint16_t entry)
+{
+ uint8_t addr;
+ uint16_t val;
+ void *vector_addr = dev->msix_table + (entry * PCI_MSIX_ENTRY_SIZE);
+
+ g_assert(dev->msix_enabled);
+ addr = qpci_find_capability(dev, PCI_CAP_ID_MSIX);
+ g_assert_cmphex(addr, !=, 0);
+ val = qpci_config_readw(dev, addr + PCI_MSIX_FLAGS);
+
+ if (val & PCI_MSIX_FLAGS_MASKALL) {
+ return true;
+ } else {
+ return (qpci_io_readl(dev, vector_addr + PCI_MSIX_ENTRY_VECTOR_CTRL)
+ & PCI_MSIX_ENTRY_CTRL_MASKBIT) != 0;
+ }
+}
+
+uint16_t qpci_msix_table_size(QPCIDevice *dev)
+{
+ uint8_t addr;
+ uint16_t control;
+
+ addr = qpci_find_capability(dev, PCI_CAP_ID_MSIX);
+ g_assert_cmphex(addr, !=, 0);
+
+ control = qpci_config_readw(dev, addr + PCI_MSIX_FLAGS);
+ return (control & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
uint8_t qpci_config_readb(QPCIDevice *dev, uint8_t offset)
{
return dev->bus->config_readb(dev->bus, dev->devfn, offset);
diff --git a/tests/libqos/pci.h b/tests/libqos/pci.h
index 9ee048b154..d51eb9e219 100644
--- a/tests/libqos/pci.h
+++ b/tests/libqos/pci.h
@@ -14,6 +14,7 @@
#define LIBQOS_PCI_H
#include <stdint.h>
+#include "libqtest.h"
#define QPCI_DEVFN(dev, fn) (((dev) << 3) | (fn))
@@ -49,6 +50,9 @@ struct QPCIDevice
{
QPCIBus *bus;
int devfn;
+ bool msix_enabled;
+ void *msix_table;
+ void *msix_pba;
};
void qpci_device_foreach(QPCIBus *bus, int vendor_id, int device_id,
@@ -57,6 +61,12 @@ void qpci_device_foreach(QPCIBus *bus, int vendor_id, int device_id,
QPCIDevice *qpci_device_find(QPCIBus *bus, int devfn);
void qpci_device_enable(QPCIDevice *dev);
+uint8_t qpci_find_capability(QPCIDevice *dev, uint8_t id);
+void qpci_msix_enable(QPCIDevice *dev);
+void qpci_msix_disable(QPCIDevice *dev);
+bool qpci_msix_pending(QPCIDevice *dev, uint16_t entry);
+bool qpci_msix_masked(QPCIDevice *dev, uint16_t entry);
+uint16_t qpci_msix_table_size(QPCIDevice *dev);
uint8_t qpci_config_readb(QPCIDevice *dev, uint8_t offset);
uint16_t qpci_config_readw(QPCIDevice *dev, uint8_t offset);
diff --git a/tests/libqos/virtio-pci.c b/tests/libqos/virtio-pci.c
new file mode 100644
index 0000000000..788ebaff46
--- /dev/null
+++ b/tests/libqos/virtio-pci.c
@@ -0,0 +1,343 @@
+/*
+ * libqos virtio PCI driver
+ *
+ * Copyright (c) 2014 Marc Marí
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <stdio.h>
+#include "libqtest.h"
+#include "libqos/virtio.h"
+#include "libqos/virtio-pci.h"
+#include "libqos/pci.h"
+#include "libqos/pci-pc.h"
+#include "libqos/malloc.h"
+#include "libqos/malloc-pc.h"
+
+#include "hw/pci/pci_regs.h"
+
+typedef struct QVirtioPCIForeachData {
+ void (*func)(QVirtioDevice *d, void *data);
+ uint16_t device_type;
+ void *user_data;
+} QVirtioPCIForeachData;
+
+static QVirtioPCIDevice *qpcidevice_to_qvirtiodevice(QPCIDevice *pdev)
+{
+ QVirtioPCIDevice *vpcidev;
+ vpcidev = g_malloc0(sizeof(*vpcidev));
+
+ if (pdev) {
+ vpcidev->pdev = pdev;
+ vpcidev->vdev.device_type =
+ qpci_config_readw(vpcidev->pdev, PCI_SUBSYSTEM_ID);
+ }
+
+ vpcidev->config_msix_entry = -1;
+
+ return vpcidev;
+}
+
+static void qvirtio_pci_foreach_callback(
+ QPCIDevice *dev, int devfn, void *data)
+{
+ QVirtioPCIForeachData *d = data;
+ QVirtioPCIDevice *vpcidev = qpcidevice_to_qvirtiodevice(dev);
+
+ if (vpcidev->vdev.device_type == d->device_type) {
+ d->func(&vpcidev->vdev, d->user_data);
+ } else {
+ g_free(vpcidev);
+ }
+}
+
+static void qvirtio_pci_assign_device(QVirtioDevice *d, void *data)
+{
+ QVirtioPCIDevice **vpcidev = data;
+ *vpcidev = (QVirtioPCIDevice *)d;
+}
+
+static uint8_t qvirtio_pci_config_readb(QVirtioDevice *d, void *addr)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ return qpci_io_readb(dev->pdev, addr);
+}
+
+static uint16_t qvirtio_pci_config_readw(QVirtioDevice *d, void *addr)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ return qpci_io_readw(dev->pdev, addr);
+}
+
+static uint32_t qvirtio_pci_config_readl(QVirtioDevice *d, void *addr)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ return qpci_io_readl(dev->pdev, addr);
+}
+
+static uint64_t qvirtio_pci_config_readq(QVirtioDevice *d, void *addr)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ int i;
+ uint64_t u64 = 0;
+
+ if (qtest_big_endian()) {
+ for (i = 0; i < 8; ++i) {
+ u64 |= (uint64_t)qpci_io_readb(dev->pdev, addr + i) << (7 - i) * 8;
+ }
+ } else {
+ for (i = 0; i < 8; ++i) {
+ u64 |= (uint64_t)qpci_io_readb(dev->pdev, addr + i) << i * 8;
+ }
+ }
+
+ return u64;
+}
+
+static uint32_t qvirtio_pci_get_features(QVirtioDevice *d)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ return qpci_io_readl(dev->pdev, dev->addr + QVIRTIO_DEVICE_FEATURES);
+}
+
+static void qvirtio_pci_set_features(QVirtioDevice *d, uint32_t features)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ qpci_io_writel(dev->pdev, dev->addr + QVIRTIO_GUEST_FEATURES, features);
+}
+
+static uint32_t qvirtio_pci_get_guest_features(QVirtioDevice *d)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ return qpci_io_readl(dev->pdev, dev->addr + QVIRTIO_GUEST_FEATURES);
+}
+
+static uint8_t qvirtio_pci_get_status(QVirtioDevice *d)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ return qpci_io_readb(dev->pdev, dev->addr + QVIRTIO_DEVICE_STATUS);
+}
+
+static void qvirtio_pci_set_status(QVirtioDevice *d, uint8_t status)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ qpci_io_writeb(dev->pdev, dev->addr + QVIRTIO_DEVICE_STATUS, status);
+}
+
+static bool qvirtio_pci_get_queue_isr_status(QVirtioDevice *d, QVirtQueue *vq)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ QVirtQueuePCI *vqpci = (QVirtQueuePCI *)vq;
+ uint32_t data;
+
+ if (dev->pdev->msix_enabled) {
+ g_assert_cmpint(vqpci->msix_entry, !=, -1);
+ if (qpci_msix_masked(dev->pdev, vqpci->msix_entry)) {
+ /* No ISR checking should be done if masked, but read anyway */
+ return qpci_msix_pending(dev->pdev, vqpci->msix_entry);
+ } else {
+ data = readl(vqpci->msix_addr);
+ writel(vqpci->msix_addr, 0);
+ return data == vqpci->msix_data;
+ }
+ } else {
+ return qpci_io_readb(dev->pdev, dev->addr + QVIRTIO_ISR_STATUS) & 1;
+ }
+}
+
+static bool qvirtio_pci_get_config_isr_status(QVirtioDevice *d)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ uint32_t data;
+
+ if (dev->pdev->msix_enabled) {
+ g_assert_cmpint(dev->config_msix_entry, !=, -1);
+ if (qpci_msix_masked(dev->pdev, dev->config_msix_entry)) {
+ /* No ISR checking should be done if masked, but read anyway */
+ return qpci_msix_pending(dev->pdev, dev->config_msix_entry);
+ } else {
+ data = readl(dev->config_msix_addr);
+ writel(dev->config_msix_addr, 0);
+ return data == dev->config_msix_data;
+ }
+ } else {
+ return qpci_io_readb(dev->pdev, dev->addr + QVIRTIO_ISR_STATUS) & 2;
+ }
+}
+
+static void qvirtio_pci_queue_select(QVirtioDevice *d, uint16_t index)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ qpci_io_writeb(dev->pdev, dev->addr + QVIRTIO_QUEUE_SELECT, index);
+}
+
+static uint16_t qvirtio_pci_get_queue_size(QVirtioDevice *d)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ return qpci_io_readw(dev->pdev, dev->addr + QVIRTIO_QUEUE_SIZE);
+}
+
+static void qvirtio_pci_set_queue_address(QVirtioDevice *d, uint32_t pfn)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ qpci_io_writel(dev->pdev, dev->addr + QVIRTIO_QUEUE_ADDRESS, pfn);
+}
+
+static QVirtQueue *qvirtio_pci_virtqueue_setup(QVirtioDevice *d,
+ QGuestAllocator *alloc, uint16_t index)
+{
+ uint32_t feat;
+ uint64_t addr;
+ QVirtQueuePCI *vqpci;
+
+ vqpci = g_malloc0(sizeof(*vqpci));
+ feat = qvirtio_pci_get_guest_features(d);
+
+ qvirtio_pci_queue_select(d, index);
+ vqpci->vq.index = index;
+ vqpci->vq.size = qvirtio_pci_get_queue_size(d);
+ vqpci->vq.free_head = 0;
+ vqpci->vq.num_free = vqpci->vq.size;
+ vqpci->vq.align = QVIRTIO_PCI_ALIGN;
+ vqpci->vq.indirect = (feat & QVIRTIO_F_RING_INDIRECT_DESC) != 0;
+ vqpci->vq.event = (feat & QVIRTIO_F_RING_EVENT_IDX) != 0;
+
+ vqpci->msix_entry = -1;
+ vqpci->msix_addr = 0;
+ vqpci->msix_data = 0x12345678;
+
+ /* Check different than 0 */
+ g_assert_cmpint(vqpci->vq.size, !=, 0);
+
+ /* Check power of 2 */
+ g_assert_cmpint(vqpci->vq.size & (vqpci->vq.size - 1), ==, 0);
+
+ addr = guest_alloc(alloc, qvring_size(vqpci->vq.size, QVIRTIO_PCI_ALIGN));
+ qvring_init(alloc, &vqpci->vq, addr);
+ qvirtio_pci_set_queue_address(d, vqpci->vq.desc / QVIRTIO_PCI_ALIGN);
+
+ return &vqpci->vq;
+}
+
+static void qvirtio_pci_virtqueue_kick(QVirtioDevice *d, QVirtQueue *vq)
+{
+ QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d;
+ qpci_io_writew(dev->pdev, dev->addr + QVIRTIO_QUEUE_NOTIFY, vq->index);
+}
+
+const QVirtioBus qvirtio_pci = {
+ .config_readb = qvirtio_pci_config_readb,
+ .config_readw = qvirtio_pci_config_readw,
+ .config_readl = qvirtio_pci_config_readl,
+ .config_readq = qvirtio_pci_config_readq,
+ .get_features = qvirtio_pci_get_features,
+ .set_features = qvirtio_pci_set_features,
+ .get_guest_features = qvirtio_pci_get_guest_features,
+ .get_status = qvirtio_pci_get_status,
+ .set_status = qvirtio_pci_set_status,
+ .get_queue_isr_status = qvirtio_pci_get_queue_isr_status,
+ .get_config_isr_status = qvirtio_pci_get_config_isr_status,
+ .queue_select = qvirtio_pci_queue_select,
+ .get_queue_size = qvirtio_pci_get_queue_size,
+ .set_queue_address = qvirtio_pci_set_queue_address,
+ .virtqueue_setup = qvirtio_pci_virtqueue_setup,
+ .virtqueue_kick = qvirtio_pci_virtqueue_kick,
+};
+
+void qvirtio_pci_foreach(QPCIBus *bus, uint16_t device_type,
+ void (*func)(QVirtioDevice *d, void *data), void *data)
+{
+ QVirtioPCIForeachData d = { .func = func,
+ .device_type = device_type,
+ .user_data = data };
+
+ qpci_device_foreach(bus, QVIRTIO_VENDOR_ID, -1,
+ qvirtio_pci_foreach_callback, &d);
+}
+
+QVirtioPCIDevice *qvirtio_pci_device_find(QPCIBus *bus, uint16_t device_type)
+{
+ QVirtioPCIDevice *dev = NULL;
+ qvirtio_pci_foreach(bus, device_type, qvirtio_pci_assign_device, &dev);
+
+ return dev;
+}
+
+void qvirtio_pci_device_enable(QVirtioPCIDevice *d)
+{
+ qpci_device_enable(d->pdev);
+ d->addr = qpci_iomap(d->pdev, 0, NULL);
+ g_assert(d->addr != NULL);
+}
+
+void qvirtio_pci_device_disable(QVirtioPCIDevice *d)
+{
+ qpci_iounmap(d->pdev, d->addr);
+ d->addr = NULL;
+}
+
+void qvirtqueue_pci_msix_setup(QVirtioPCIDevice *d, QVirtQueuePCI *vqpci,
+ QGuestAllocator *alloc, uint16_t entry)
+{
+ uint16_t vector;
+ uint32_t control;
+ void *addr;
+
+ g_assert(d->pdev->msix_enabled);
+ addr = d->pdev->msix_table + (entry * 16);
+
+ g_assert_cmpint(entry, >=, 0);
+ g_assert_cmpint(entry, <, qpci_msix_table_size(d->pdev));
+ vqpci->msix_entry = entry;
+
+ vqpci->msix_addr = guest_alloc(alloc, 4);
+ qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_LOWER_ADDR,
+ vqpci->msix_addr & ~0UL);
+ qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_UPPER_ADDR,
+ (vqpci->msix_addr >> 32) & ~0UL);
+ qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_DATA, vqpci->msix_data);
+
+ control = qpci_io_readl(d->pdev, addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_VECTOR_CTRL,
+ control & ~PCI_MSIX_ENTRY_CTRL_MASKBIT);
+
+ qvirtio_pci_queue_select(&d->vdev, vqpci->vq.index);
+ qpci_io_writew(d->pdev, d->addr + QVIRTIO_MSIX_QUEUE_VECTOR, entry);
+ vector = qpci_io_readw(d->pdev, d->addr + QVIRTIO_MSIX_QUEUE_VECTOR);
+ g_assert_cmphex(vector, !=, QVIRTIO_MSI_NO_VECTOR);
+}
+
+void qvirtio_pci_set_msix_configuration_vector(QVirtioPCIDevice *d,
+ QGuestAllocator *alloc, uint16_t entry)
+{
+ uint16_t vector;
+ uint32_t control;
+ void *addr;
+
+ g_assert(d->pdev->msix_enabled);
+ addr = d->pdev->msix_table + (entry * 16);
+
+ g_assert_cmpint(entry, >=, 0);
+ g_assert_cmpint(entry, <, qpci_msix_table_size(d->pdev));
+ d->config_msix_entry = entry;
+
+ d->config_msix_data = 0x12345678;
+ d->config_msix_addr = guest_alloc(alloc, 4);
+
+ qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_LOWER_ADDR,
+ d->config_msix_addr & ~0UL);
+ qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_UPPER_ADDR,
+ (d->config_msix_addr >> 32) & ~0UL);
+ qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_DATA, d->config_msix_data);
+
+ control = qpci_io_readl(d->pdev, addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_VECTOR_CTRL,
+ control & ~PCI_MSIX_ENTRY_CTRL_MASKBIT);
+
+ qpci_io_writew(d->pdev, d->addr + QVIRTIO_MSIX_CONF_VECTOR, entry);
+ vector = qpci_io_readw(d->pdev, d->addr + QVIRTIO_MSIX_CONF_VECTOR);
+ g_assert_cmphex(vector, !=, QVIRTIO_MSI_NO_VECTOR);
+}
diff --git a/tests/libqos/virtio-pci.h b/tests/libqos/virtio-pci.h
new file mode 100644
index 0000000000..883f7ff267
--- /dev/null
+++ b/tests/libqos/virtio-pci.h
@@ -0,0 +1,61 @@
+/*
+ * libqos virtio PCI definitions
+ *
+ * Copyright (c) 2014 Marc Marí
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef LIBQOS_VIRTIO_PCI_H
+#define LIBQOS_VIRTIO_PCI_H
+
+#include "libqos/virtio.h"
+#include "libqos/pci.h"
+
+#define QVIRTIO_DEVICE_FEATURES 0x00
+#define QVIRTIO_GUEST_FEATURES 0x04
+#define QVIRTIO_QUEUE_ADDRESS 0x08
+#define QVIRTIO_QUEUE_SIZE 0x0C
+#define QVIRTIO_QUEUE_SELECT 0x0E
+#define QVIRTIO_QUEUE_NOTIFY 0x10
+#define QVIRTIO_DEVICE_STATUS 0x12
+#define QVIRTIO_ISR_STATUS 0x13
+#define QVIRTIO_MSIX_CONF_VECTOR 0x14
+#define QVIRTIO_MSIX_QUEUE_VECTOR 0x16
+#define QVIRTIO_DEVICE_SPECIFIC_MSIX 0x18
+#define QVIRTIO_DEVICE_SPECIFIC_NO_MSIX 0x14
+
+#define QVIRTIO_PCI_ALIGN 4096
+
+#define QVIRTIO_MSI_NO_VECTOR 0xFFFF
+
+typedef struct QVirtioPCIDevice {
+ QVirtioDevice vdev;
+ QPCIDevice *pdev;
+ void *addr;
+ uint16_t config_msix_entry;
+ uint64_t config_msix_addr;
+ uint32_t config_msix_data;
+} QVirtioPCIDevice;
+
+typedef struct QVirtQueuePCI {
+ QVirtQueue vq;
+ uint16_t msix_entry;
+ uint64_t msix_addr;
+ uint32_t msix_data;
+} QVirtQueuePCI;
+
+extern const QVirtioBus qvirtio_pci;
+
+void qvirtio_pci_foreach(QPCIBus *bus, uint16_t device_type,
+ void (*func)(QVirtioDevice *d, void *data), void *data);
+QVirtioPCIDevice *qvirtio_pci_device_find(QPCIBus *bus, uint16_t device_type);
+void qvirtio_pci_device_enable(QVirtioPCIDevice *d);
+void qvirtio_pci_device_disable(QVirtioPCIDevice *d);
+
+void qvirtio_pci_set_msix_configuration_vector(QVirtioPCIDevice *d,
+ QGuestAllocator *alloc, uint16_t entry);
+void qvirtqueue_pci_msix_setup(QVirtioPCIDevice *d, QVirtQueuePCI *vqpci,
+ QGuestAllocator *alloc, uint16_t entry);
+#endif
diff --git a/tests/libqos/virtio.c b/tests/libqos/virtio.c
new file mode 100644
index 0000000000..9b6de2c0a7
--- /dev/null
+++ b/tests/libqos/virtio.c
@@ -0,0 +1,257 @@
+/*
+ * libqos virtio driver
+ *
+ * Copyright (c) 2014 Marc Marí
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include "libqtest.h"
+#include "libqos/virtio.h"
+
+uint8_t qvirtio_config_readb(const QVirtioBus *bus, QVirtioDevice *d,
+ void *addr)
+{
+ return bus->config_readb(d, addr);
+}
+
+uint16_t qvirtio_config_readw(const QVirtioBus *bus, QVirtioDevice *d,
+ void *addr)
+{
+ return bus->config_readw(d, addr);
+}
+
+uint32_t qvirtio_config_readl(const QVirtioBus *bus, QVirtioDevice *d,
+ void *addr)
+{
+ return bus->config_readl(d, addr);
+}
+
+uint64_t qvirtio_config_readq(const QVirtioBus *bus, QVirtioDevice *d,
+ void *addr)
+{
+ return bus->config_readq(d, addr);
+}
+
+uint32_t qvirtio_get_features(const QVirtioBus *bus, QVirtioDevice *d)
+{
+ return bus->get_features(d);
+}
+
+void qvirtio_set_features(const QVirtioBus *bus, QVirtioDevice *d,
+ uint32_t features)
+{
+ bus->set_features(d, features);
+}
+
+QVirtQueue *qvirtqueue_setup(const QVirtioBus *bus, QVirtioDevice *d,
+ QGuestAllocator *alloc, uint16_t index)
+{
+ return bus->virtqueue_setup(d, alloc, index);
+}
+
+void qvirtio_reset(const QVirtioBus *bus, QVirtioDevice *d)
+{
+ bus->set_status(d, QVIRTIO_RESET);
+ g_assert_cmphex(bus->get_status(d), ==, QVIRTIO_RESET);
+}
+
+void qvirtio_set_acknowledge(const QVirtioBus *bus, QVirtioDevice *d)
+{
+ bus->set_status(d, bus->get_status(d) | QVIRTIO_ACKNOWLEDGE);
+ g_assert_cmphex(bus->get_status(d), ==, QVIRTIO_ACKNOWLEDGE);
+}
+
+void qvirtio_set_driver(const QVirtioBus *bus, QVirtioDevice *d)
+{
+ bus->set_status(d, bus->get_status(d) | QVIRTIO_DRIVER);
+ g_assert_cmphex(bus->get_status(d), ==,
+ QVIRTIO_DRIVER | QVIRTIO_ACKNOWLEDGE);
+}
+
+void qvirtio_set_driver_ok(const QVirtioBus *bus, QVirtioDevice *d)
+{
+ bus->set_status(d, bus->get_status(d) | QVIRTIO_DRIVER_OK);
+ g_assert_cmphex(bus->get_status(d), ==,
+ QVIRTIO_DRIVER_OK | QVIRTIO_DRIVER | QVIRTIO_ACKNOWLEDGE);
+}
+
+bool qvirtio_wait_queue_isr(const QVirtioBus *bus, QVirtioDevice *d,
+ QVirtQueue *vq, uint64_t timeout)
+{
+ do {
+ clock_step(100);
+ if (bus->get_queue_isr_status(d, vq)) {
+ break; /* It has ended */
+ }
+ } while (--timeout);
+
+ return timeout != 0;
+}
+
+bool qvirtio_wait_config_isr(const QVirtioBus *bus, QVirtioDevice *d,
+ uint64_t timeout)
+{
+ do {
+ clock_step(100);
+ if (bus->get_config_isr_status(d)) {
+ break; /* It has ended */
+ }
+ } while (--timeout);
+
+ return timeout != 0;
+}
+
+void qvring_init(const QGuestAllocator *alloc, QVirtQueue *vq, uint64_t addr)
+{
+ int i;
+
+ vq->desc = addr;
+ vq->avail = vq->desc + vq->size*sizeof(QVRingDesc);
+ vq->used = (uint64_t)((vq->avail + sizeof(uint16_t) * (3 + vq->size)
+ + vq->align - 1) & ~(vq->align - 1));
+
+ for (i = 0; i < vq->size - 1; i++) {
+ /* vq->desc[i].addr */
+ writew(vq->desc + (16 * i), 0);
+ /* vq->desc[i].next */
+ writew(vq->desc + (16 * i) + 14, i + 1);
+ }
+
+ /* vq->avail->flags */
+ writew(vq->avail, 0);
+ /* vq->avail->idx */
+ writew(vq->avail + 2, 0);
+ /* vq->avail->used_event */
+ writew(vq->avail + 4 + (2 * vq->size), 0);
+
+ /* vq->used->flags */
+ writew(vq->used, 0);
+ /* vq->used->avail_event */
+ writew(vq->used+2+(sizeof(struct QVRingUsedElem)*vq->size), 0);
+}
+
+QVRingIndirectDesc *qvring_indirect_desc_setup(QVirtioDevice *d,
+ QGuestAllocator *alloc, uint16_t elem)
+{
+ int i;
+ QVRingIndirectDesc *indirect = g_malloc(sizeof(*indirect));
+
+ indirect->index = 0;
+ indirect->elem = elem;
+ indirect->desc = guest_alloc(alloc, sizeof(QVRingDesc)*elem);
+
+ for (i = 0; i < elem - 1; ++i) {
+ /* indirect->desc[i].addr */
+ writeq(indirect->desc + (16 * i), 0);
+ /* indirect->desc[i].flags */
+ writew(indirect->desc + (16 * i) + 12, QVRING_DESC_F_NEXT);
+ /* indirect->desc[i].next */
+ writew(indirect->desc + (16 * i) + 14, i + 1);
+ }
+
+ return indirect;
+}
+
+void qvring_indirect_desc_add(QVRingIndirectDesc *indirect, uint64_t data,
+ uint32_t len, bool write)
+{
+ uint16_t flags;
+
+ g_assert_cmpint(indirect->index, <, indirect->elem);
+
+ flags = readw(indirect->desc + (16 * indirect->index) + 12);
+
+ if (write) {
+ flags |= QVRING_DESC_F_WRITE;
+ }
+
+ /* indirect->desc[indirect->index].addr */
+ writeq(indirect->desc + (16 * indirect->index), data);
+ /* indirect->desc[indirect->index].len */
+ writel(indirect->desc + (16 * indirect->index) + 8, len);
+ /* indirect->desc[indirect->index].flags */
+ writew(indirect->desc + (16 * indirect->index) + 12, flags);
+
+ indirect->index++;
+}
+
+uint32_t qvirtqueue_add(QVirtQueue *vq, uint64_t data, uint32_t len, bool write,
+ bool next)
+{
+ uint16_t flags = 0;
+ vq->num_free--;
+
+ if (write) {
+ flags |= QVRING_DESC_F_WRITE;
+ }
+
+ if (next) {
+ flags |= QVRING_DESC_F_NEXT;
+ }
+
+ /* vq->desc[vq->free_head].addr */
+ writeq(vq->desc + (16 * vq->free_head), data);
+ /* vq->desc[vq->free_head].len */
+ writel(vq->desc + (16 * vq->free_head) + 8, len);
+ /* vq->desc[vq->free_head].flags */
+ writew(vq->desc + (16 * vq->free_head) + 12, flags);
+
+ return vq->free_head++; /* Return and increase, in this order */
+}
+
+uint32_t qvirtqueue_add_indirect(QVirtQueue *vq, QVRingIndirectDesc *indirect)
+{
+ g_assert(vq->indirect);
+ g_assert_cmpint(vq->size, >=, indirect->elem);
+ g_assert_cmpint(indirect->index, ==, indirect->elem);
+
+ vq->num_free--;
+
+ /* vq->desc[vq->free_head].addr */
+ writeq(vq->desc + (16 * vq->free_head), indirect->desc);
+ /* vq->desc[vq->free_head].len */
+ writel(vq->desc + (16 * vq->free_head) + 8,
+ sizeof(QVRingDesc) * indirect->elem);
+ /* vq->desc[vq->free_head].flags */
+ writew(vq->desc + (16 * vq->free_head) + 12, QVRING_DESC_F_INDIRECT);
+
+ return vq->free_head++; /* Return and increase, in this order */
+}
+
+void qvirtqueue_kick(const QVirtioBus *bus, QVirtioDevice *d, QVirtQueue *vq,
+ uint32_t free_head)
+{
+ /* vq->avail->idx */
+ uint16_t idx = readl(vq->avail + 2);
+ /* vq->used->flags */
+ uint16_t flags;
+ /* vq->used->avail_event */
+ uint16_t avail_event;
+
+ /* vq->avail->ring[idx % vq->size] */
+ writel(vq->avail + 4 + (2 * (idx % vq->size)), free_head);
+ /* vq->avail->idx */
+ writel(vq->avail + 2, idx + 1);
+
+ /* Must read after idx is updated */
+ flags = readw(vq->avail);
+ avail_event = readw(vq->used + 4 +
+ (sizeof(struct QVRingUsedElem) * vq->size));
+
+ /* < 1 because we add elements to avail queue one by one */
+ if ((flags & QVRING_USED_F_NO_NOTIFY) == 0 &&
+ (!vq->event || (uint16_t)(idx-avail_event) < 1)) {
+ bus->virtqueue_kick(d, vq);
+ }
+}
+
+void qvirtqueue_set_used_event(QVirtQueue *vq, uint16_t idx)
+{
+ g_assert(vq->event);
+
+ /* vq->avail->used_event */
+ writew(vq->avail + 4 + (2 * vq->size), idx);
+}
diff --git a/tests/libqos/virtio.h b/tests/libqos/virtio.h
new file mode 100644
index 0000000000..70b3376360
--- /dev/null
+++ b/tests/libqos/virtio.h
@@ -0,0 +1,182 @@
+/*
+ * libqos virtio definitions
+ *
+ * Copyright (c) 2014 Marc Marí
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef LIBQOS_VIRTIO_H
+#define LIBQOS_VIRTIO_H
+
+#include "libqos/malloc.h"
+
+#define QVIRTIO_VENDOR_ID 0x1AF4
+
+#define QVIRTIO_RESET 0x0
+#define QVIRTIO_ACKNOWLEDGE 0x1
+#define QVIRTIO_DRIVER 0x2
+#define QVIRTIO_DRIVER_OK 0x4
+
+#define QVIRTIO_NET_DEVICE_ID 0x1
+#define QVIRTIO_BLK_DEVICE_ID 0x2
+
+#define QVIRTIO_F_NOTIFY_ON_EMPTY 0x01000000
+#define QVIRTIO_F_ANY_LAYOUT 0x08000000
+#define QVIRTIO_F_RING_INDIRECT_DESC 0x10000000
+#define QVIRTIO_F_RING_EVENT_IDX 0x20000000
+#define QVIRTIO_F_BAD_FEATURE 0x40000000
+
+#define QVRING_DESC_F_NEXT 0x1
+#define QVRING_DESC_F_WRITE 0x2
+#define QVRING_DESC_F_INDIRECT 0x4
+
+#define QVIRTIO_F_NOTIFY_ON_EMPTY 0x01000000
+#define QVIRTIO_F_ANY_LAYOUT 0x08000000
+#define QVIRTIO_F_RING_INDIRECT_DESC 0x10000000
+#define QVIRTIO_F_RING_EVENT_IDX 0x20000000
+#define QVIRTIO_F_BAD_FEATURE 0x40000000
+
+#define QVRING_AVAIL_F_NO_INTERRUPT 1
+
+#define QVRING_USED_F_NO_NOTIFY 1
+
+typedef struct QVirtioDevice {
+ /* Device type */
+ uint16_t device_type;
+} QVirtioDevice;
+
+typedef struct QVRingDesc {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t flags;
+ uint16_t next;
+} QVRingDesc;
+
+typedef struct QVRingAvail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0]; /* This is an array of uint16_t */
+ uint16_t used_event;
+} QVRingAvail;
+
+typedef struct QVRingUsedElem {
+ uint32_t id;
+ uint32_t len;
+} QVRingUsedElem;
+
+typedef struct QVRingUsed {
+ uint16_t flags;
+ uint16_t idx;
+ QVRingUsedElem ring[0]; /* This is an array of QVRingUsedElem structs */
+ uint16_t avail_event;
+} QVRingUsed;
+
+typedef struct QVirtQueue {
+ uint64_t desc; /* This points to an array of QVRingDesc */
+ uint64_t avail; /* This points to a QVRingAvail */
+ uint64_t used; /* This points to a QVRingDesc */
+ uint16_t index;
+ uint32_t size;
+ uint32_t free_head;
+ uint32_t num_free;
+ uint32_t align;
+ bool indirect;
+ bool event;
+} QVirtQueue;
+
+typedef struct QVRingIndirectDesc {
+ uint64_t desc; /* This points to an array fo QVRingDesc */
+ uint16_t index;
+ uint16_t elem;
+} QVRingIndirectDesc;
+
+typedef struct QVirtioBus {
+ uint8_t (*config_readb)(QVirtioDevice *d, void *addr);
+ uint16_t (*config_readw)(QVirtioDevice *d, void *addr);
+ uint32_t (*config_readl)(QVirtioDevice *d, void *addr);
+ uint64_t (*config_readq)(QVirtioDevice *d, void *addr);
+
+ /* Get features of the device */
+ uint32_t (*get_features)(QVirtioDevice *d);
+
+ /* Set features of the device */
+ void (*set_features)(QVirtioDevice *d, uint32_t features);
+
+ /* Get features of the guest */
+ uint32_t (*get_guest_features)(QVirtioDevice *d);
+
+ /* Get status of the device */
+ uint8_t (*get_status)(QVirtioDevice *d);
+
+ /* Set status of the device */
+ void (*set_status)(QVirtioDevice *d, uint8_t status);
+
+ /* Get the queue ISR status of the device */
+ bool (*get_queue_isr_status)(QVirtioDevice *d, QVirtQueue *vq);
+
+ /* Get the configuration ISR status of the device */
+ bool (*get_config_isr_status)(QVirtioDevice *d);
+
+ /* Select a queue to work on */
+ void (*queue_select)(QVirtioDevice *d, uint16_t index);
+
+ /* Get the size of the selected queue */
+ uint16_t (*get_queue_size)(QVirtioDevice *d);
+
+ /* Set the address of the selected queue */
+ void (*set_queue_address)(QVirtioDevice *d, uint32_t pfn);
+
+ /* Setup the virtqueue specified by index */
+ QVirtQueue *(*virtqueue_setup)(QVirtioDevice *d, QGuestAllocator *alloc,
+ uint16_t index);
+
+ /* Notify changes in virtqueue */
+ void (*virtqueue_kick)(QVirtioDevice *d, QVirtQueue *vq);
+} QVirtioBus;
+
+static inline uint32_t qvring_size(uint32_t num, uint32_t align)
+{
+ return ((sizeof(struct QVRingDesc) * num + sizeof(uint16_t) * (3 + num)
+ + align - 1) & ~(align - 1))
+ + sizeof(uint16_t) * 3 + sizeof(struct QVRingUsedElem) * num;
+}
+
+uint8_t qvirtio_config_readb(const QVirtioBus *bus, QVirtioDevice *d,
+ void *addr);
+uint16_t qvirtio_config_readw(const QVirtioBus *bus, QVirtioDevice *d,
+ void *addr);
+uint32_t qvirtio_config_readl(const QVirtioBus *bus, QVirtioDevice *d,
+ void *addr);
+uint64_t qvirtio_config_readq(const QVirtioBus *bus, QVirtioDevice *d,
+ void *addr);
+uint32_t qvirtio_get_features(const QVirtioBus *bus, QVirtioDevice *d);
+void qvirtio_set_features(const QVirtioBus *bus, QVirtioDevice *d,
+ uint32_t features);
+
+void qvirtio_reset(const QVirtioBus *bus, QVirtioDevice *d);
+void qvirtio_set_acknowledge(const QVirtioBus *bus, QVirtioDevice *d);
+void qvirtio_set_driver(const QVirtioBus *bus, QVirtioDevice *d);
+void qvirtio_set_driver_ok(const QVirtioBus *bus, QVirtioDevice *d);
+
+bool qvirtio_wait_queue_isr(const QVirtioBus *bus, QVirtioDevice *d,
+ QVirtQueue *vq, uint64_t timeout);
+bool qvirtio_wait_config_isr(const QVirtioBus *bus, QVirtioDevice *d,
+ uint64_t timeout);
+QVirtQueue *qvirtqueue_setup(const QVirtioBus *bus, QVirtioDevice *d,
+ QGuestAllocator *alloc, uint16_t index);
+
+void qvring_init(const QGuestAllocator *alloc, QVirtQueue *vq, uint64_t addr);
+QVRingIndirectDesc *qvring_indirect_desc_setup(QVirtioDevice *d,
+ QGuestAllocator *alloc, uint16_t elem);
+void qvring_indirect_desc_add(QVRingIndirectDesc *indirect, uint64_t data,
+ uint32_t len, bool write);
+uint32_t qvirtqueue_add(QVirtQueue *vq, uint64_t data, uint32_t len, bool write,
+ bool next);
+uint32_t qvirtqueue_add_indirect(QVirtQueue *vq, QVRingIndirectDesc *indirect);
+void qvirtqueue_kick(const QVirtioBus *bus, QVirtioDevice *d, QVirtQueue *vq,
+ uint32_t free_head);
+
+void qvirtqueue_set_used_event(QVirtQueue *vq, uint16_t idx);
+#endif
diff --git a/tests/libqtest.c b/tests/libqtest.c
index 5e458e884e..9a92aa70e4 100644
--- a/tests/libqtest.c
+++ b/tests/libqtest.c
@@ -696,3 +696,51 @@ void qmp_discard_response(const char *fmt, ...)
qtest_qmpv_discard_response(global_qtest, fmt, ap);
va_end(ap);
}
+
+bool qtest_big_endian(void)
+{
+ const char *arch = qtest_get_arch();
+ int i;
+
+ static const struct {
+ const char *arch;
+ bool big_endian;
+ } endianness[] = {
+ { "aarch64", false },
+ { "alpha", false },
+ { "arm", false },
+ { "cris", false },
+ { "i386", false },
+ { "lm32", true },
+ { "m68k", true },
+ { "microblaze", true },
+ { "microblazeel", false },
+ { "mips", true },
+ { "mips64", true },
+ { "mips64el", false },
+ { "mipsel", false },
+ { "moxie", true },
+ { "or32", true },
+ { "ppc", true },
+ { "ppc64", true },
+ { "ppcemb", true },
+ { "s390x", true },
+ { "sh4", false },
+ { "sh4eb", true },
+ { "sparc", true },
+ { "sparc64", true },
+ { "unicore32", false },
+ { "x86_64", false },
+ { "xtensa", false },
+ { "xtensaeb", true },
+ {},
+ };
+
+ for (i = 0; endianness[i].arch; i++) {
+ if (strcmp(endianness[i].arch, arch) == 0) {
+ return endianness[i].big_endian;
+ }
+ }
+
+ return false;
+}
diff --git a/tests/libqtest.h b/tests/libqtest.h
index 1be0934f07..3e12cab2f2 100644
--- a/tests/libqtest.h
+++ b/tests/libqtest.h
@@ -682,4 +682,11 @@ static inline int64_t clock_set(int64_t val)
return qtest_clock_set(global_qtest, val);
}
+/**
+ * qtest_big_endian:
+ *
+ * Returns: True if the architecture under test has a big endian configuration.
+ */
+bool qtest_big_endian(void);
+
#endif
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
index d53f875b89..588666cff1 100644
--- a/tests/virtio-blk-test.c
+++ b/tests/virtio-blk-test.c
@@ -2,6 +2,7 @@
* QTest testcase for VirtIO Block Device
*
* Copyright (c) 2014 SUSE LINUX Products GmbH
+ * Copyright (c) 2014 Marc Marí
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
@@ -9,12 +10,634 @@
#include <glib.h>
#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
#include "libqtest.h"
-#include "qemu/osdep.h"
+#include "libqos/virtio.h"
+#include "libqos/virtio-pci.h"
+#include "libqos/pci-pc.h"
+#include "libqos/malloc.h"
+#include "libqos/malloc-pc.h"
+#include "qemu/bswap.h"
-/* Tests only initialization so far. TODO: Replace with functional tests */
-static void pci_nop(void)
+#define QVIRTIO_BLK_F_BARRIER 0x00000001
+#define QVIRTIO_BLK_F_SIZE_MAX 0x00000002
+#define QVIRTIO_BLK_F_SEG_MAX 0x00000004
+#define QVIRTIO_BLK_F_GEOMETRY 0x00000010
+#define QVIRTIO_BLK_F_RO 0x00000020
+#define QVIRTIO_BLK_F_BLK_SIZE 0x00000040
+#define QVIRTIO_BLK_F_SCSI 0x00000080
+#define QVIRTIO_BLK_F_WCE 0x00000200
+#define QVIRTIO_BLK_F_TOPOLOGY 0x00000400
+#define QVIRTIO_BLK_F_CONFIG_WCE 0x00000800
+
+#define QVIRTIO_BLK_T_IN 0
+#define QVIRTIO_BLK_T_OUT 1
+#define QVIRTIO_BLK_T_SCSI_CMD 2
+#define QVIRTIO_BLK_T_SCSI_CMD_OUT 3
+#define QVIRTIO_BLK_T_FLUSH 4
+#define QVIRTIO_BLK_T_FLUSH_OUT 5
+#define QVIRTIO_BLK_T_GET_ID 8
+
+#define TEST_IMAGE_SIZE (64 * 1024 * 1024)
+#define QVIRTIO_BLK_TIMEOUT 100
+#define PCI_SLOT 0x04
+#define PCI_FN 0x00
+
+typedef struct QVirtioBlkReq {
+ uint32_t type;
+ uint32_t ioprio;
+ uint64_t sector;
+ char *data;
+ uint8_t status;
+} QVirtioBlkReq;
+
+static QPCIBus *test_start(void)
+{
+ char cmdline[100];
+ char tmp_path[] = "/tmp/qtest.XXXXXX";
+ int fd, ret;
+
+ /* Create a temporary raw image */
+ fd = mkstemp(tmp_path);
+ g_assert_cmpint(fd, >=, 0);
+ ret = ftruncate(fd, TEST_IMAGE_SIZE);
+ g_assert_cmpint(ret, ==, 0);
+ close(fd);
+
+ snprintf(cmdline, 100, "-drive if=none,id=drive0,file=%s "
+ "-device virtio-blk-pci,drive=drive0,addr=%x.%x",
+ tmp_path, PCI_SLOT, PCI_FN);
+ qtest_start(cmdline);
+ unlink(tmp_path);
+
+ return qpci_init_pc();
+}
+
+static void test_end(void)
+{
+ qtest_end();
+}
+
+static QVirtioPCIDevice *virtio_blk_init(QPCIBus *bus)
+{
+ QVirtioPCIDevice *dev;
+
+ dev = qvirtio_pci_device_find(bus, QVIRTIO_BLK_DEVICE_ID);
+ g_assert(dev != NULL);
+ g_assert_cmphex(dev->vdev.device_type, ==, QVIRTIO_BLK_DEVICE_ID);
+ g_assert_cmphex(dev->pdev->devfn, ==, ((PCI_SLOT << 3) | PCI_FN));
+
+ qvirtio_pci_device_enable(dev);
+ qvirtio_reset(&qvirtio_pci, &dev->vdev);
+ qvirtio_set_acknowledge(&qvirtio_pci, &dev->vdev);
+ qvirtio_set_driver(&qvirtio_pci, &dev->vdev);
+
+ return dev;
+}
+
+static inline void virtio_blk_fix_request(QVirtioBlkReq *req)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+ bool host_endian = true;
+#else
+ bool host_endian = false;
+#endif
+
+ if (qtest_big_endian() != host_endian) {
+ req->type = bswap32(req->type);
+ req->ioprio = bswap32(req->ioprio);
+ req->sector = bswap64(req->sector);
+ }
+}
+
+static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioBlkReq *req,
+ uint64_t data_size)
+{
+ uint64_t addr;
+ uint8_t status = 0xFF;
+
+ g_assert_cmpuint(data_size % 512, ==, 0);
+ addr = guest_alloc(alloc, sizeof(*req) + data_size);
+
+ virtio_blk_fix_request(req);
+
+ memwrite(addr, req, 16);
+ memwrite(addr + 16, req->data, data_size);
+ memwrite(addr + 16 + data_size, &status, sizeof(status));
+
+ return addr;
+}
+
+static void pci_basic(void)
+{
+ QVirtioPCIDevice *dev;
+ QPCIBus *bus;
+ QVirtQueuePCI *vqpci;
+ QGuestAllocator *alloc;
+ QVirtioBlkReq req;
+ void *addr;
+ uint64_t req_addr;
+ uint64_t capacity;
+ uint32_t features;
+ uint32_t free_head;
+ uint8_t status;
+ char *data;
+
+ bus = test_start();
+
+ dev = virtio_blk_init(bus);
+
+ /* MSI-X is not enabled */
+ addr = dev->addr + QVIRTIO_DEVICE_SPECIFIC_NO_MSIX;
+
+ capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr);
+ g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
+
+ features = qvirtio_get_features(&qvirtio_pci, &dev->vdev);
+ features = features & ~(QVIRTIO_F_BAD_FEATURE |
+ QVIRTIO_F_RING_INDIRECT_DESC | QVIRTIO_F_RING_EVENT_IDX |
+ QVIRTIO_BLK_F_SCSI);
+ qvirtio_set_features(&qvirtio_pci, &dev->vdev, features);
+
+ alloc = pc_alloc_init();
+ vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&qvirtio_pci, &dev->vdev,
+ alloc, 0);
+
+ qvirtio_set_driver_ok(&qvirtio_pci, &dev->vdev);
+
+ /* Write and read with 2 descriptor layout */
+ /* Write request */
+ req.type = QVIRTIO_BLK_T_OUT;
+ req.ioprio = 1;
+ req.sector = 0;
+ req.data = g_malloc0(512);
+ strcpy(req.data, "TEST");
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ free_head = qvirtqueue_add(&vqpci->vq, req_addr, 528, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ guest_free(alloc, req_addr);
+
+ /* Read request */
+ req.type = QVIRTIO_BLK_T_IN;
+ req.ioprio = 1;
+ req.sector = 0;
+ req.data = g_malloc0(512);
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 16, 513, true, false);
+
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ data = g_malloc0(512);
+ memread(req_addr + 16, data, 512);
+ g_assert_cmpstr(data, ==, "TEST");
+ g_free(data);
+
+ guest_free(alloc, req_addr);
+
+ /* Write and read with 3 descriptor layout */
+ /* Write request */
+ req.type = QVIRTIO_BLK_T_OUT;
+ req.ioprio = 1;
+ req.sector = 1;
+ req.data = g_malloc0(512);
+ strcpy(req.data, "TEST");
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 16, 512, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
+
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ guest_free(alloc, req_addr);
+
+ /* Read request */
+ req.type = QVIRTIO_BLK_T_IN;
+ req.ioprio = 1;
+ req.sector = 1;
+ req.data = g_malloc0(512);
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 16, 512, true, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
+
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ data = g_malloc0(512);
+ memread(req_addr + 16, data, 512);
+ g_assert_cmpstr(data, ==, "TEST");
+ g_free(data);
+
+ guest_free(alloc, req_addr);
+
+ /* End test */
+ guest_free(alloc, vqpci->vq.desc);
+ qvirtio_pci_device_disable(dev);
+ g_free(dev);
+ test_end();
+}
+
+static void pci_indirect(void)
+{
+ QVirtioPCIDevice *dev;
+ QPCIBus *bus;
+ QVirtQueuePCI *vqpci;
+ QGuestAllocator *alloc;
+ QVirtioBlkReq req;
+ QVRingIndirectDesc *indirect;
+ void *addr;
+ uint64_t req_addr;
+ uint64_t capacity;
+ uint32_t features;
+ uint32_t free_head;
+ uint8_t status;
+ char *data;
+
+ bus = test_start();
+
+ dev = virtio_blk_init(bus);
+
+ /* MSI-X is not enabled */
+ addr = dev->addr + QVIRTIO_DEVICE_SPECIFIC_NO_MSIX;
+
+ capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr);
+ g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
+
+ features = qvirtio_get_features(&qvirtio_pci, &dev->vdev);
+ g_assert_cmphex(features & QVIRTIO_F_RING_INDIRECT_DESC, !=, 0);
+ features = features & ~(QVIRTIO_F_BAD_FEATURE | QVIRTIO_F_RING_EVENT_IDX |
+ QVIRTIO_BLK_F_SCSI);
+ qvirtio_set_features(&qvirtio_pci, &dev->vdev, features);
+
+ alloc = pc_alloc_init();
+ vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&qvirtio_pci, &dev->vdev,
+ alloc, 0);
+ qvirtio_set_driver_ok(&qvirtio_pci, &dev->vdev);
+
+ /* Write request */
+ req.type = QVIRTIO_BLK_T_OUT;
+ req.ioprio = 1;
+ req.sector = 0;
+ req.data = g_malloc0(512);
+ strcpy(req.data, "TEST");
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ indirect = qvring_indirect_desc_setup(&dev->vdev, alloc, 2);
+ qvring_indirect_desc_add(indirect, req_addr, 528, false);
+ qvring_indirect_desc_add(indirect, req_addr + 528, 1, true);
+ free_head = qvirtqueue_add_indirect(&vqpci->vq, indirect);
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ g_free(indirect);
+ guest_free(alloc, req_addr);
+
+ /* Read request */
+ req.type = QVIRTIO_BLK_T_IN;
+ req.ioprio = 1;
+ req.sector = 0;
+ req.data = g_malloc0(512);
+ strcpy(req.data, "TEST");
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ indirect = qvring_indirect_desc_setup(&dev->vdev, alloc, 2);
+ qvring_indirect_desc_add(indirect, req_addr, 16, false);
+ qvring_indirect_desc_add(indirect, req_addr + 16, 513, true);
+ free_head = qvirtqueue_add_indirect(&vqpci->vq, indirect);
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ data = g_malloc0(512);
+ memread(req_addr + 16, data, 512);
+ g_assert_cmpstr(data, ==, "TEST");
+ g_free(data);
+
+ g_free(indirect);
+ guest_free(alloc, req_addr);
+
+ /* End test */
+ guest_free(alloc, vqpci->vq.desc);
+ qvirtio_pci_device_disable(dev);
+ g_free(dev);
+ test_end();
+}
+
+static void pci_config(void)
{
+ QVirtioPCIDevice *dev;
+ QPCIBus *bus;
+ int n_size = TEST_IMAGE_SIZE / 2;
+ void *addr;
+ uint64_t capacity;
+
+ bus = test_start();
+
+ dev = virtio_blk_init(bus);
+
+ /* MSI-X is not enabled */
+ addr = dev->addr + QVIRTIO_DEVICE_SPECIFIC_NO_MSIX;
+
+ capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr);
+ g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
+
+ qvirtio_set_driver_ok(&qvirtio_pci, &dev->vdev);
+
+ qmp("{ 'execute': 'block_resize', 'arguments': { 'device': 'drive0', "
+ " 'size': %d } }", n_size);
+ g_assert(qvirtio_wait_config_isr(&qvirtio_pci, &dev->vdev,
+ QVIRTIO_BLK_TIMEOUT));
+
+ capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr);
+ g_assert_cmpint(capacity, ==, n_size / 512);
+
+ qvirtio_pci_device_disable(dev);
+ g_free(dev);
+ test_end();
+}
+
+static void pci_msix(void)
+{
+ QVirtioPCIDevice *dev;
+ QPCIBus *bus;
+ QVirtQueuePCI *vqpci;
+ QGuestAllocator *alloc;
+ QVirtioBlkReq req;
+ int n_size = TEST_IMAGE_SIZE / 2;
+ void *addr;
+ uint64_t req_addr;
+ uint64_t capacity;
+ uint32_t features;
+ uint32_t free_head;
+ uint8_t status;
+ char *data;
+
+ bus = test_start();
+ alloc = pc_alloc_init();
+
+ dev = virtio_blk_init(bus);
+ qpci_msix_enable(dev->pdev);
+
+ qvirtio_pci_set_msix_configuration_vector(dev, alloc, 0);
+
+ /* MSI-X is enabled */
+ addr = dev->addr + QVIRTIO_DEVICE_SPECIFIC_MSIX;
+
+ capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr);
+ g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
+
+ features = qvirtio_get_features(&qvirtio_pci, &dev->vdev);
+ features = features & ~(QVIRTIO_F_BAD_FEATURE |
+ QVIRTIO_F_RING_INDIRECT_DESC |
+ QVIRTIO_F_RING_EVENT_IDX | QVIRTIO_BLK_F_SCSI);
+ qvirtio_set_features(&qvirtio_pci, &dev->vdev, features);
+
+ vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&qvirtio_pci, &dev->vdev,
+ alloc, 0);
+ qvirtqueue_pci_msix_setup(dev, vqpci, alloc, 1);
+
+ qvirtio_set_driver_ok(&qvirtio_pci, &dev->vdev);
+
+ qmp("{ 'execute': 'block_resize', 'arguments': { 'device': 'drive0', "
+ " 'size': %d } }", n_size);
+
+ g_assert(qvirtio_wait_config_isr(&qvirtio_pci, &dev->vdev,
+ QVIRTIO_BLK_TIMEOUT));
+
+ capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr);
+ g_assert_cmpint(capacity, ==, n_size / 512);
+
+ /* Write request */
+ req.type = QVIRTIO_BLK_T_OUT;
+ req.ioprio = 1;
+ req.sector = 0;
+ req.data = g_malloc0(512);
+ strcpy(req.data, "TEST");
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ free_head = qvirtqueue_add(&vqpci->vq, req_addr, 528, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ guest_free(alloc, req_addr);
+
+ /* Read request */
+ req.type = QVIRTIO_BLK_T_IN;
+ req.ioprio = 1;
+ req.sector = 0;
+ req.data = g_malloc0(512);
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 16, 513, true, false);
+
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ data = g_malloc0(512);
+ memread(req_addr + 16, data, 512);
+ g_assert_cmpstr(data, ==, "TEST");
+ g_free(data);
+
+ guest_free(alloc, req_addr);
+
+ /* End test */
+ guest_free(alloc, (uint64_t)vqpci->vq.desc);
+ qpci_msix_disable(dev->pdev);
+ qvirtio_pci_device_disable(dev);
+ g_free(dev);
+ test_end();
+}
+
+static void pci_idx(void)
+{
+ QVirtioPCIDevice *dev;
+ QPCIBus *bus;
+ QVirtQueuePCI *vqpci;
+ QGuestAllocator *alloc;
+ QVirtioBlkReq req;
+ void *addr;
+ uint64_t req_addr;
+ uint64_t capacity;
+ uint32_t features;
+ uint32_t free_head;
+ uint8_t status;
+ char *data;
+
+ bus = test_start();
+ alloc = pc_alloc_init();
+
+ dev = virtio_blk_init(bus);
+ qpci_msix_enable(dev->pdev);
+
+ qvirtio_pci_set_msix_configuration_vector(dev, alloc, 0);
+
+ /* MSI-X is enabled */
+ addr = dev->addr + QVIRTIO_DEVICE_SPECIFIC_MSIX;
+
+ capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr);
+ g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
+
+ features = qvirtio_get_features(&qvirtio_pci, &dev->vdev);
+ features = features & ~(QVIRTIO_F_BAD_FEATURE |
+ QVIRTIO_F_RING_INDIRECT_DESC |
+ QVIRTIO_F_NOTIFY_ON_EMPTY | QVIRTIO_BLK_F_SCSI);
+ qvirtio_set_features(&qvirtio_pci, &dev->vdev, features);
+
+ vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&qvirtio_pci, &dev->vdev,
+ alloc, 0);
+ qvirtqueue_pci_msix_setup(dev, vqpci, alloc, 1);
+
+ qvirtio_set_driver_ok(&qvirtio_pci, &dev->vdev);
+
+ /* Write request */
+ req.type = QVIRTIO_BLK_T_OUT;
+ req.ioprio = 1;
+ req.sector = 0;
+ req.data = g_malloc0(512);
+ strcpy(req.data, "TEST");
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ free_head = qvirtqueue_add(&vqpci->vq, req_addr, 528, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+
+ /* Write request */
+ req.type = QVIRTIO_BLK_T_OUT;
+ req.ioprio = 1;
+ req.sector = 1;
+ req.data = g_malloc0(512);
+ strcpy(req.data, "TEST");
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ /* Notify after processing the third request */
+ qvirtqueue_set_used_event(&vqpci->vq, 2);
+ free_head = qvirtqueue_add(&vqpci->vq, req_addr, 528, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+ /* No notification expected */
+ g_assert(!qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ guest_free(alloc, req_addr);
+
+ /* Read request */
+ req.type = QVIRTIO_BLK_T_IN;
+ req.ioprio = 1;
+ req.sector = 1;
+ req.data = g_malloc0(512);
+
+ req_addr = virtio_blk_request(alloc, &req, 512);
+
+ g_free(req.data);
+
+ free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
+ qvirtqueue_add(&vqpci->vq, req_addr + 16, 513, true, false);
+
+ qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
+
+
+ g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
+ QVIRTIO_BLK_TIMEOUT));
+
+ status = readb(req_addr + 528);
+ g_assert_cmpint(status, ==, 0);
+
+ data = g_malloc0(512);
+ memread(req_addr + 16, data, 512);
+ g_assert_cmpstr(data, ==, "TEST");
+ g_free(data);
+
+ guest_free(alloc, req_addr);
+
+ /* End test */
+ guest_free(alloc, vqpci->vq.desc);
+ qpci_msix_disable(dev->pdev);
+ qvirtio_pci_device_disable(dev);
+ g_free(dev);
+ test_end();
}
int main(int argc, char **argv)
@@ -22,13 +645,14 @@ int main(int argc, char **argv)
int ret;
g_test_init(&argc, &argv, NULL);
- qtest_add_func("/virtio/blk/pci/nop", pci_nop);
- qtest_start("-drive id=drv0,if=none,file=/dev/null "
- "-device virtio-blk-pci,drive=drv0");
- ret = g_test_run();
+ g_test_add_func("/virtio/blk/pci/basic", pci_basic);
+ g_test_add_func("/virtio/blk/pci/indirect", pci_indirect);
+ g_test_add_func("/virtio/blk/pci/config", pci_config);
+ g_test_add_func("/virtio/blk/pci/msix", pci_msix);
+ g_test_add_func("/virtio/blk/pci/idx", pci_idx);
- qtest_end();
+ ret = g_test_run();
return ret;
}
diff --git a/trace-events b/trace-events
index 03ac5d205c..fb58963ca8 100644
--- a/trace-events
+++ b/trace-events
@@ -1045,7 +1045,7 @@ console_txt_new(int w, int h) "%dx%d"
console_select(int nr) "%d"
console_refresh(int interval) "interval %d ms"
displaysurface_create(void *display_surface, int w, int h) "surface=%p, %dx%d"
-displaysurface_create_from(void *display_surface, int w, int h, int bpp, int swap) "surface=%p, %dx%d, bpp %d, bswap %d"
+displaysurface_create_from(void *display_surface, int w, int h, uint32_t format) "surface=%p, %dx%d, format 0x%x"
displaysurface_free(void *display_surface) "surface=%p"
displaychangelistener_register(void *dcl, const char *name) "%p [ %s ]"
displaychangelistener_unregister(void *dcl, const char *name) "%p [ %s ]"
diff --git a/ui/console.c b/ui/console.c
index ab8454903c..5d73d811c0 100644
--- a/ui/console.c
+++ b/ui/console.c
@@ -28,6 +28,7 @@
#include "qmp-commands.h"
#include "sysemu/char.h"
#include "trace.h"
+#include "exec/memory.h"
#define DEFAULT_BACKSCROLL 512
#define CONSOLE_CURSOR_PERIOD 500
@@ -1224,61 +1225,77 @@ static QemuConsole *new_console(DisplayState *ds, console_type_t console_type,
return s;
}
-static void qemu_alloc_display(DisplaySurface *surface, int width, int height,
- int linesize, PixelFormat pf, int newflags)
+static void qemu_alloc_display(DisplaySurface *surface, int width, int height)
{
- surface->pf = pf;
-
qemu_pixman_image_unref(surface->image);
surface->image = NULL;
- surface->format = qemu_pixman_get_format(&pf);
- assert(surface->format != 0);
+ surface->format = PIXMAN_x8r8g8b8;
surface->image = pixman_image_create_bits(surface->format,
width, height,
- NULL, linesize);
+ NULL, width * 4);
assert(surface->image != NULL);
- surface->flags = newflags | QEMU_ALLOCATED_FLAG;
-#ifdef HOST_WORDS_BIGENDIAN
- surface->flags |= QEMU_BIG_ENDIAN_FLAG;
-#endif
+ surface->flags = QEMU_ALLOCATED_FLAG;
}
DisplaySurface *qemu_create_displaysurface(int width, int height)
{
DisplaySurface *surface = g_new0(DisplaySurface, 1);
- int linesize = width * 4;
trace_displaysurface_create(surface, width, height);
- qemu_alloc_display(surface, width, height, linesize,
- qemu_default_pixelformat(32), 0);
+ qemu_alloc_display(surface, width, height);
return surface;
}
-DisplaySurface *qemu_create_displaysurface_from(int width, int height, int bpp,
- int linesize, uint8_t *data,
- bool byteswap)
+DisplaySurface *qemu_create_displaysurface_from(int width, int height,
+ pixman_format_code_t format,
+ int linesize, uint8_t *data)
{
DisplaySurface *surface = g_new0(DisplaySurface, 1);
- trace_displaysurface_create_from(surface, width, height, bpp, byteswap);
- if (byteswap) {
- surface->pf = qemu_different_endianness_pixelformat(bpp);
- } else {
- surface->pf = qemu_default_pixelformat(bpp);
- }
-
- surface->format = qemu_pixman_get_format(&surface->pf);
- assert(surface->format != 0);
+ trace_displaysurface_create_from(surface, width, height, format);
+ surface->format = format;
surface->image = pixman_image_create_bits(surface->format,
width, height,
(void *)data, linesize);
assert(surface->image != NULL);
-#ifdef HOST_WORDS_BIGENDIAN
- surface->flags = QEMU_BIG_ENDIAN_FLAG;
-#endif
+ return surface;
+}
+
+static void qemu_unmap_displaysurface_guestmem(pixman_image_t *image,
+ void *unused)
+{
+ void *data = pixman_image_get_data(image);
+ uint32_t size = pixman_image_get_stride(image) *
+ pixman_image_get_height(image);
+ cpu_physical_memory_unmap(data, size, 0, 0);
+}
+
+DisplaySurface *qemu_create_displaysurface_guestmem(int width, int height,
+ pixman_format_code_t format,
+ int linesize, uint64_t addr)
+{
+ DisplaySurface *surface;
+ hwaddr size;
+ void *data;
+
+ if (linesize == 0) {
+ linesize = width * PIXMAN_FORMAT_BPP(format) / 8;
+ }
+
+ size = linesize * height;
+ data = cpu_physical_memory_map(addr, &size, 0);
+ if (size != linesize * height) {
+ cpu_physical_memory_unmap(data, size, 0, 0);
+ return NULL;
+ }
+
+ surface = qemu_create_displaysurface_from
+ (width, height, format, linesize, data);
+ pixman_image_set_destroy_function
+ (surface->image, qemu_unmap_displaysurface_guestmem, NULL);
return surface;
}
@@ -1557,6 +1574,67 @@ bool dpy_cursor_define_supported(QemuConsole *con)
return false;
}
+/*
+ * Call dpy_gfx_update for all dirity scanlines. Works for
+ * DisplaySurfaces backed by guest memory (i.e. the ones created
+ * using qemu_create_displaysurface_guestmem).
+ */
+void dpy_gfx_update_dirty(QemuConsole *con,
+ MemoryRegion *address_space,
+ hwaddr base,
+ bool invalidate)
+{
+ DisplaySurface *ds = qemu_console_surface(con);
+ int width = surface_stride(ds);
+ int height = surface_height(ds);
+ hwaddr size = width * height;
+ MemoryRegionSection mem_section;
+ MemoryRegion *mem;
+ ram_addr_t addr;
+ int first, last, i;
+ bool dirty;
+
+ mem_section = memory_region_find(address_space, base, size);
+ mem = mem_section.mr;
+ if (int128_get64(mem_section.size) != size ||
+ !memory_region_is_ram(mem_section.mr)) {
+ goto out;
+ }
+ assert(mem);
+
+ memory_region_sync_dirty_bitmap(mem);
+ addr = mem_section.offset_within_region;
+
+ first = -1;
+ last = -1;
+ for (i = 0; i < height; i++, addr += width) {
+ dirty = invalidate ||
+ memory_region_get_dirty(mem, addr, width, DIRTY_MEMORY_VGA);
+ if (dirty) {
+ if (first == -1) {
+ first = i;
+ }
+ last = i;
+ }
+ if (first != -1 && !dirty) {
+ assert(last != -1 && last >= first);
+ dpy_gfx_update(con, 0, first, surface_width(ds),
+ last - first + 1);
+ first = -1;
+ }
+ }
+ if (first != -1) {
+ assert(last != -1 && last >= first);
+ dpy_gfx_update(con, 0, first, surface_width(ds),
+ last - first + 1);
+ }
+
+ memory_region_reset_dirty(mem, mem_section.offset_within_region, size,
+ DIRTY_MEMORY_VGA);
+out:
+ memory_region_unref(mem);
+}
+
/***********************************************************/
/* register display */
@@ -1902,124 +1980,15 @@ DisplayState *qemu_console_displaystate(QemuConsole *console)
PixelFormat qemu_different_endianness_pixelformat(int bpp)
{
- PixelFormat pf;
-
- memset(&pf, 0x00, sizeof(PixelFormat));
-
- pf.bits_per_pixel = bpp;
- pf.bytes_per_pixel = DIV_ROUND_UP(bpp, 8);
- pf.depth = bpp == 32 ? 24 : bpp;
-
- switch (bpp) {
- case 24:
- pf.rmask = 0x000000FF;
- pf.gmask = 0x0000FF00;
- pf.bmask = 0x00FF0000;
- pf.rmax = 255;
- pf.gmax = 255;
- pf.bmax = 255;
- pf.rshift = 0;
- pf.gshift = 8;
- pf.bshift = 16;
- pf.rbits = 8;
- pf.gbits = 8;
- pf.bbits = 8;
- break;
- case 32:
- pf.rmask = 0x0000FF00;
- pf.gmask = 0x00FF0000;
- pf.bmask = 0xFF000000;
- pf.amask = 0x00000000;
- pf.amax = 255;
- pf.rmax = 255;
- pf.gmax = 255;
- pf.bmax = 255;
- pf.ashift = 0;
- pf.rshift = 8;
- pf.gshift = 16;
- pf.bshift = 24;
- pf.rbits = 8;
- pf.gbits = 8;
- pf.bbits = 8;
- pf.abits = 8;
- break;
- default:
- break;
- }
+ pixman_format_code_t fmt = qemu_default_pixman_format(bpp, false);
+ PixelFormat pf = qemu_pixelformat_from_pixman(fmt);
return pf;
}
PixelFormat qemu_default_pixelformat(int bpp)
{
- PixelFormat pf;
-
- memset(&pf, 0x00, sizeof(PixelFormat));
-
- pf.bits_per_pixel = bpp;
- pf.bytes_per_pixel = DIV_ROUND_UP(bpp, 8);
- pf.depth = bpp == 32 ? 24 : bpp;
-
- switch (bpp) {
- case 15:
- pf.bits_per_pixel = 16;
- pf.rmask = 0x00007c00;
- pf.gmask = 0x000003E0;
- pf.bmask = 0x0000001F;
- pf.rmax = 31;
- pf.gmax = 31;
- pf.bmax = 31;
- pf.rshift = 10;
- pf.gshift = 5;
- pf.bshift = 0;
- pf.rbits = 5;
- pf.gbits = 5;
- pf.bbits = 5;
- break;
- case 16:
- pf.rmask = 0x0000F800;
- pf.gmask = 0x000007E0;
- pf.bmask = 0x0000001F;
- pf.rmax = 31;
- pf.gmax = 63;
- pf.bmax = 31;
- pf.rshift = 11;
- pf.gshift = 5;
- pf.bshift = 0;
- pf.rbits = 5;
- pf.gbits = 6;
- pf.bbits = 5;
- break;
- case 24:
- pf.rmask = 0x00FF0000;
- pf.gmask = 0x0000FF00;
- pf.bmask = 0x000000FF;
- pf.rmax = 255;
- pf.gmax = 255;
- pf.bmax = 255;
- pf.rshift = 16;
- pf.gshift = 8;
- pf.bshift = 0;
- pf.rbits = 8;
- pf.gbits = 8;
- pf.bbits = 8;
- break;
- case 32:
- pf.rmask = 0x00FF0000;
- pf.gmask = 0x0000FF00;
- pf.bmask = 0x000000FF;
- pf.rmax = 255;
- pf.gmax = 255;
- pf.bmax = 255;
- pf.rshift = 16;
- pf.gshift = 8;
- pf.bshift = 0;
- pf.rbits = 8;
- pf.gbits = 8;
- pf.bbits = 8;
- break;
- default:
- break;
- }
+ pixman_format_code_t fmt = qemu_default_pixman_format(bpp, true);
+ PixelFormat pf = qemu_pixelformat_from_pixman(fmt);
return pf;
}
diff --git a/ui/qemu-pixman.c b/ui/qemu-pixman.c
index 254bd8ce10..30c7fdd011 100644
--- a/ui/qemu-pixman.c
+++ b/ui/qemu-pixman.c
@@ -6,6 +6,87 @@
#include "qemu-common.h"
#include "ui/console.h"
+PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format)
+{
+ PixelFormat pf;
+ uint8_t bpp;
+
+ bpp = pf.bits_per_pixel = PIXMAN_FORMAT_BPP(format);
+ pf.bytes_per_pixel = PIXMAN_FORMAT_BPP(format) / 8;
+ pf.depth = PIXMAN_FORMAT_DEPTH(format);
+
+ pf.abits = PIXMAN_FORMAT_A(format);
+ pf.rbits = PIXMAN_FORMAT_R(format);
+ pf.gbits = PIXMAN_FORMAT_G(format);
+ pf.bbits = PIXMAN_FORMAT_B(format);
+
+ switch (PIXMAN_FORMAT_TYPE(format)) {
+ case PIXMAN_TYPE_ARGB:
+ pf.ashift = pf.bbits + pf.gbits + pf.rbits;
+ pf.rshift = pf.bbits + pf.gbits;
+ pf.gshift = pf.bbits;
+ pf.bshift = 0;
+ break;
+ case PIXMAN_TYPE_ABGR:
+ pf.ashift = pf.rbits + pf.gbits + pf.bbits;
+ pf.bshift = pf.rbits + pf.gbits;
+ pf.gshift = pf.rbits;
+ pf.rshift = 0;
+ break;
+ case PIXMAN_TYPE_BGRA:
+ pf.bshift = bpp - pf.bbits;
+ pf.gshift = bpp - (pf.bbits + pf.gbits);
+ pf.rshift = bpp - (pf.bbits + pf.gbits + pf.rbits);
+ pf.ashift = 0;
+ break;
+ case PIXMAN_TYPE_RGBA:
+ pf.rshift = bpp - pf.rbits;
+ pf.gshift = bpp - (pf.rbits + pf.gbits);
+ pf.bshift = bpp - (pf.rbits + pf.gbits + pf.bbits);
+ pf.ashift = 0;
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+
+ pf.amax = (1 << pf.abits) - 1;
+ pf.rmax = (1 << pf.rbits) - 1;
+ pf.gmax = (1 << pf.gbits) - 1;
+ pf.bmax = (1 << pf.bbits) - 1;
+ pf.amask = pf.amax << pf.ashift;
+ pf.rmask = pf.rmax << pf.rshift;
+ pf.gmask = pf.gmax << pf.gshift;
+ pf.bmask = pf.bmax << pf.bshift;
+
+ return pf;
+}
+
+pixman_format_code_t qemu_default_pixman_format(int bpp, bool native_endian)
+{
+ if (native_endian) {
+ switch (bpp) {
+ case 15:
+ return PIXMAN_x1r5g5b5;
+ case 16:
+ return PIXMAN_r5g6b5;
+ case 24:
+ return PIXMAN_r8g8b8;
+ case 32:
+ return PIXMAN_x8r8g8b8;
+ }
+ } else {
+ switch (bpp) {
+ case 24:
+ return PIXMAN_b8g8r8;
+ case 32:
+ return PIXMAN_b8g8r8a8;
+ break;
+ }
+ }
+ g_assert_not_reached();
+}
+
int qemu_pixman_get_type(int rshift, int gshift, int bshift)
{
int type = PIXMAN_TYPE_OTHER;
@@ -52,6 +133,7 @@ pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format,
return image;
}
+/* fill linebuf from framebuffer */
void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb,
int width, int x, int y)
{
@@ -59,6 +141,14 @@ void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb,
x, y, 0, 0, 0, 0, width, 1);
}
+/* copy linebuf to framebuffer */
+void qemu_pixman_linebuf_copy(pixman_image_t *fb, int width, int x, int y,
+ pixman_image_t *linebuf)
+{
+ pixman_image_composite(PIXMAN_OP_SRC, linebuf, NULL, fb,
+ 0, 0, 0, 0, x, y, width, 1);
+}
+
pixman_image_t *qemu_pixman_mirror_create(pixman_format_code_t format,
pixman_image_t *image)
{
diff --git a/ui/sdl.c b/ui/sdl.c
index 4e7f920e37..94c1d9dc3a 100644
--- a/ui/sdl.c
+++ b/ui/sdl.c
@@ -127,6 +127,7 @@ static void do_sdl_resize(int width, int height, int bpp)
static void sdl_switch(DisplayChangeListener *dcl,
DisplaySurface *new_surface)
{
+ PixelFormat pf = qemu_pixelformat_from_pixman(new_surface->format);
/* temporary hack: allows to call sdl_switch to handle scaling changes */
if (new_surface) {
@@ -148,8 +149,8 @@ static void sdl_switch(DisplayChangeListener *dcl,
(surface_data(surface),
surface_width(surface), surface_height(surface),
surface_bits_per_pixel(surface), surface_stride(surface),
- surface->pf.rmask, surface->pf.gmask,
- surface->pf.bmask, surface->pf.amask);
+ pf.rmask, pf.gmask,
+ pf.bmask, pf.amask);
}
/* generic keyboard conversion */
diff --git a/ui/vnc-enc-tight.c b/ui/vnc-enc-tight.c
index f02352cc46..3d1b5cd066 100644
--- a/ui/vnc-enc-tight.c
+++ b/ui/vnc-enc-tight.c
@@ -220,8 +220,7 @@ tight_detect_smooth_image24(VncState *vs, int w, int h)
unsigned int errors; \
unsigned char *buf = vs->tight.tight.buffer; \
\
- endian = 0; /* FIXME: ((vs->clientds.flags & QEMU_BIG_ENDIAN_FLAG) != \
- (vs->ds->surface->flags & QEMU_BIG_ENDIAN_FLAG)); */ \
+ endian = 0; /* FIXME */ \
\
\
max[0] = vs->client_pf.rmax; \
@@ -563,8 +562,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h)
buf32 = (uint32_t *)buf;
memset(vs->tight.gradient.buffer, 0, w * 3 * sizeof(int));
- if (1 /* FIXME: (vs->clientds.flags & QEMU_BIG_ENDIAN_FLAG) ==
- (vs->ds->surface->flags & QEMU_BIG_ENDIAN_FLAG) */) {
+ if (1 /* FIXME */) {
shift[0] = vs->client_pf.rshift;
shift[1] = vs->client_pf.gshift;
shift[2] = vs->client_pf.bshift;
@@ -621,8 +619,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h)
\
memset (vs->tight.gradient.buffer, 0, w * 3 * sizeof(int)); \
\
- endian = 0; /* FIXME: ((vs->clientds.flags & QEMU_BIG_ENDIAN_FLAG) != \
- (vs->ds->surface->flags & QEMU_BIG_ENDIAN_FLAG)); */ \
+ endian = 0; /* FIXME */ \
\
max[0] = vs->client_pf.rmax; \
max[1] = vs->client_pf.gmax; \
@@ -898,8 +895,7 @@ static void tight_pack24(VncState *vs, uint8_t *buf, size_t count, size_t *ret)
buf32 = (uint32_t *)buf;
- if (1 /* FIXME: (vs->clientds.flags & QEMU_BIG_ENDIAN_FLAG) ==
- (vs->ds->surface->flags & QEMU_BIG_ENDIAN_FLAG) */) {
+ if (1 /* FIXME */) {
rshift = vs->client_pf.rshift;
gshift = vs->client_pf.gshift;
bshift = vs->client_pf.bshift;