diff options
47 files changed, 1159 insertions, 373 deletions
@@ -5028,7 +5028,7 @@ case "$target_name" in aarch64) TARGET_BASE_ARCH=arm bflt="yes" - gdb_xml_files="aarch64-core.xml aarch64-fpu.xml" + gdb_xml_files="aarch64-core.xml aarch64-fpu.xml arm-core.xml arm-vfp.xml arm-vfp3.xml arm-neon.xml" ;; cris) ;; diff --git a/disas/sparc.c b/disas/sparc.c index 8eb22e6fc3..8e755d1ba2 100644 --- a/disas/sparc.c +++ b/disas/sparc.c @@ -1175,15 +1175,11 @@ static const struct sparc_opcode sparc_opcodes[] = { { "subcc", F3(2, 0x14, 0), F3(~2, ~0x14, ~0)|ASI(~0), "1,2,d", 0, v6 }, { "subcc", F3(2, 0x14, 1), F3(~2, ~0x14, ~1), "1,i,d", 0, v6 }, -{ "subx", F3(2, 0x0c, 0), F3(~2, ~0x0c, ~0)|ASI(~0), "1,2,d", 0, v6notv9 }, -{ "subx", F3(2, 0x0c, 1), F3(~2, ~0x0c, ~1), "1,i,d", 0, v6notv9 }, -{ "subc", F3(2, 0x0c, 0), F3(~2, ~0x0c, ~0)|ASI(~0), "1,2,d", 0, v9 }, -{ "subc", F3(2, 0x0c, 1), F3(~2, ~0x0c, ~1), "1,i,d", 0, v9 }, +{ "subc", F3(2, 0x0c, 0), F3(~2, ~0x0c, ~0)|ASI(~0), "1,2,d", 0, v6 }, +{ "subc", F3(2, 0x0c, 1), F3(~2, ~0x0c, ~1), "1,i,d", 0, v6 }, -{ "subxcc", F3(2, 0x1c, 0), F3(~2, ~0x1c, ~0)|ASI(~0), "1,2,d", 0, v6notv9 }, -{ "subxcc", F3(2, 0x1c, 1), F3(~2, ~0x1c, ~1), "1,i,d", 0, v6notv9 }, -{ "subccc", F3(2, 0x1c, 0), F3(~2, ~0x1c, ~0)|ASI(~0), "1,2,d", 0, v9 }, -{ "subccc", F3(2, 0x1c, 1), F3(~2, ~0x1c, ~1), "1,i,d", 0, v9 }, +{ "subccc", F3(2, 0x1c, 0), F3(~2, ~0x1c, ~0)|ASI(~0), "1,2,d", 0, v6 }, +{ "subccc", F3(2, 0x1c, 1), F3(~2, ~0x1c, ~1), "1,i,d", 0, v6 }, { "and", F3(2, 0x01, 0), F3(~2, ~0x01, ~0)|ASI(~0), "1,2,d", 0, v6 }, { "and", F3(2, 0x01, 1), F3(~2, ~0x01, ~1), "1,i,d", 0, v6 }, @@ -1215,19 +1211,13 @@ static const struct sparc_opcode sparc_opcodes[] = { { "addcc", F3(2, 0x10, 1), F3(~2, ~0x10, ~1), "1,i,d", 0, v6 }, { "addcc", F3(2, 0x10, 1), F3(~2, ~0x10, ~1), "i,1,d", 0, v6 }, -{ "addx", F3(2, 0x08, 0), F3(~2, ~0x08, ~0)|ASI(~0), "1,2,d", 0, v6notv9 }, -{ "addx", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "1,i,d", 0, v6notv9 }, -{ "addx", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "i,1,d", 0, v6notv9 }, -{ "addc", F3(2, 0x08, 0), F3(~2, ~0x08, ~0)|ASI(~0), "1,2,d", 0, v9 }, -{ "addc", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "1,i,d", 0, v9 }, -{ "addc", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "i,1,d", 0, v9 }, +{ "addc", F3(2, 0x08, 0), F3(~2, ~0x08, ~0)|ASI(~0), "1,2,d", 0, v6 }, +{ "addc", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "1,i,d", 0, v6 }, +{ "addc", F3(2, 0x08, 1), F3(~2, ~0x08, ~1), "i,1,d", 0, v6 }, -{ "addxcc", F3(2, 0x18, 0), F3(~2, ~0x18, ~0)|ASI(~0), "1,2,d", 0, v6notv9 }, -{ "addxcc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "1,i,d", 0, v6notv9 }, -{ "addxcc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "i,1,d", 0, v6notv9 }, -{ "addccc", F3(2, 0x18, 0), F3(~2, ~0x18, ~0)|ASI(~0), "1,2,d", 0, v9 }, -{ "addccc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "1,i,d", 0, v9 }, -{ "addccc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "i,1,d", 0, v9 }, +{ "addccc", F3(2, 0x18, 0), F3(~2, ~0x18, ~0)|ASI(~0), "1,2,d", 0, v6 }, +{ "addccc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "1,i,d", 0, v6 }, +{ "addccc", F3(2, 0x18, 1), F3(~2, ~0x18, ~1), "i,1,d", 0, v6 }, { "smul", F3(2, 0x0b, 0), F3(~2, ~0x0b, ~0)|ASI(~0), "1,2,d", 0, v8 }, { "smul", F3(2, 0x0b, 1), F3(~2, ~0x0b, ~1), "1,i,d", 0, v8 }, @@ -2042,6 +2032,10 @@ IMPDEP ("impdep2", 0x37), #undef IMPDEP +{ "addxc", F3F(2, 0x36, 0x011), F3F(~2, ~0x36, ~0x011), "1,2,d", 0, v9b }, +{ "addxccc", F3F(2, 0x36, 0x013), F3F(~2, ~0x36, ~0x013), "1,2,d", 0, v9b }, +{ "umulxhi", F3F(2, 0x36, 0x016), F3F(~2, ~0x36, ~0x016), "1,2,d", 0, v9b }, + }; static const int sparc_num_opcodes = ((sizeof sparc_opcodes)/(sizeof sparc_opcodes[0])); diff --git a/hmp-commands.hx b/hmp-commands.hx index f859f8d29f..0b1a4f778a 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -1778,6 +1778,8 @@ show qdev device model list show roms @item info tpm show the TPM device +@item info memory-devices +show the memory devices @end table ETEXI @@ -1720,3 +1720,41 @@ void hmp_info_memdev(Monitor *mon, const QDict *qdict) qapi_free_MemdevList(memdev_list); } + +void hmp_info_memory_devices(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + MemoryDeviceInfoList *info_list = qmp_query_memory_devices(&err); + MemoryDeviceInfoList *info; + MemoryDeviceInfo *value; + PCDIMMDeviceInfo *di; + + for (info = info_list; info; info = info->next) { + value = info->value; + + if (value) { + switch (value->kind) { + case MEMORY_DEVICE_INFO_KIND_DIMM: + di = value->dimm; + + monitor_printf(mon, "Memory device [%s]: \"%s\"\n", + MemoryDeviceInfoKind_lookup[value->kind], + di->id ? di->id : ""); + monitor_printf(mon, " addr: 0x%" PRIx64 "\n", di->addr); + monitor_printf(mon, " slot: %" PRId64 "\n", di->slot); + monitor_printf(mon, " node: %" PRId64 "\n", di->node); + monitor_printf(mon, " size: %" PRIu64 "\n", di->size); + monitor_printf(mon, " memdev: %s\n", di->memdev); + monitor_printf(mon, " hotplugged: %s\n", + di->hotplugged ? "true" : "false"); + monitor_printf(mon, " hotpluggable: %s\n", + di->hotpluggable ? "true" : "false"); + break; + default: + break; + } + } + } + + qapi_free_MemoryDeviceInfoList(info_list); +} @@ -94,6 +94,7 @@ void hmp_cpu_add(Monitor *mon, const QDict *qdict); void hmp_object_add(Monitor *mon, const QDict *qdict); void hmp_object_del(Monitor *mon, const QDict *qdict); void hmp_info_memdev(Monitor *mon, const QDict *qdict); +void hmp_info_memory_devices(Monitor *mon, const QDict *qdict); void object_add_completion(ReadLineState *rs, int nb_args, const char *str); void object_del_completion(ReadLineState *rs, int nb_args, const char *str); void device_add_completion(ReadLineState *rs, int nb_args, const char *str); diff --git a/hw/display/blizzard.c b/hw/display/blizzard.c index 55c0ddf00b..92b1fac563 100644 --- a/hw/display/blizzard.c +++ b/hw/display/blizzard.c @@ -134,14 +134,6 @@ static const int blizzard_iformat_bpp[0x10] = { 0, 0, 0, 0, 0, 0, }; -static inline void blizzard_rgb2yuv(int r, int g, int b, - int *y, int *u, int *v) -{ - *y = 0x10 + ((0x838 * r + 0x1022 * g + 0x322 * b) >> 13); - *u = 0x80 + ((0xe0e * b - 0x04c1 * r - 0x94e * g) >> 13); - *v = 0x80 + ((0xe0e * r - 0x0bc7 * g - 0x247 * b) >> 13); -} - static void blizzard_window(BlizzardState *s) { DisplaySurface *surface = qemu_console_surface(s->con); diff --git a/hw/display/pxa2xx_lcd.c b/hw/display/pxa2xx_lcd.c index 611fb174cd..ac3c018822 100644 --- a/hw/display/pxa2xx_lcd.c +++ b/hw/display/pxa2xx_lcd.c @@ -279,14 +279,6 @@ static inline void pxa2xx_dma_ber_set(PXA2xxLCDState *s, int ch) s->liidr = s->dma_ch[ch].id; } -/* Set Read Status interrupt high and poke associated registers */ -static inline void pxa2xx_dma_rdst_set(PXA2xxLCDState *s) -{ - s->status[0] |= LCSR0_RDST; - if (s->irqlevel && !(s->control[0] & LCCR0_RDSTM)) - s->status[0] |= LCSR0_SINT; -} - /* Load new Frame Descriptors from DMA */ static void pxa2xx_descriptor_load(PXA2xxLCDState *s) { diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 55d13a7ca7..93b3518b21 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -132,6 +132,8 @@ static void qxl_reset_memslots(PCIQXLDevice *d); static void qxl_reset_surfaces(PCIQXLDevice *d); static void qxl_ring_set_dirty(PCIQXLDevice *qxl); +static void qxl_hw_update(void *opaque); + void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) { trace_qxl_set_guest_bug(qxl->id); @@ -1076,6 +1078,10 @@ static const QXLInterface qxl_interface = { .client_monitors_config = interface_client_monitors_config, }; +static const GraphicHwOps qxl_ops = { + .gfx_update = qxl_hw_update, +}; + static void qxl_enter_vga_mode(PCIQXLDevice *d) { if (d->mode == QXL_MODE_VGA) { @@ -1085,6 +1091,7 @@ static void qxl_enter_vga_mode(PCIQXLDevice *d) #if SPICE_SERVER_VERSION >= 0x000c03 /* release 0.12.3 */ spice_qxl_driver_unload(&d->ssd.qxl); #endif + graphic_console_set_hwops(d->ssd.dcl.con, d->vga.hw_ops, &d->vga); qemu_spice_create_host_primary(&d->ssd); d->mode = QXL_MODE_VGA; vga_dirty_log_start(&d->vga); @@ -1097,6 +1104,7 @@ static void qxl_exit_vga_mode(PCIQXLDevice *d) return; } trace_qxl_exit_vga_mode(d->id); + graphic_console_set_hwops(d->ssd.dcl.con, &qxl_ops, d); vga_dirty_log_stop(&d->vga); qxl_destroy_primary(d, QXL_SYNC); } @@ -1756,41 +1764,8 @@ static void qxl_send_events(PCIQXLDevice *d, uint32_t events) static void qxl_hw_update(void *opaque) { PCIQXLDevice *qxl = opaque; - VGACommonState *vga = &qxl->vga; - switch (qxl->mode) { - case QXL_MODE_VGA: - vga->hw_ops->gfx_update(vga); - break; - case QXL_MODE_COMPAT: - case QXL_MODE_NATIVE: - qxl_render_update(qxl); - break; - default: - break; - } -} - -static void qxl_hw_invalidate(void *opaque) -{ - PCIQXLDevice *qxl = opaque; - VGACommonState *vga = &qxl->vga; - - if (qxl->mode == QXL_MODE_VGA) { - vga->hw_ops->invalidate(vga); - return; - } -} - -static void qxl_hw_text_update(void *opaque, console_ch_t *chardata) -{ - PCIQXLDevice *qxl = opaque; - VGACommonState *vga = &qxl->vga; - - if (qxl->mode == QXL_MODE_VGA) { - vga->hw_ops->text_update(vga, chardata); - return; - } + qxl_render_update(qxl); } static void qxl_dirty_surfaces(PCIQXLDevice *qxl) @@ -2049,12 +2024,6 @@ static int qxl_init_common(PCIQXLDevice *qxl) return 0; } -static const GraphicHwOps qxl_ops = { - .invalidate = qxl_hw_invalidate, - .gfx_update = qxl_hw_update, - .text_update = qxl_hw_text_update, -}; - static int qxl_init_primary(PCIDevice *dev) { PCIQXLDevice *qxl = DO_UPCAST(PCIQXLDevice, pci, dev); diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c index aa5b6886ea..fae3385636 100644 --- a/hw/input/tsc210x.c +++ b/hw/input/tsc210x.c @@ -215,36 +215,6 @@ typedef struct { int fsref; } TSC210xRateInfo; -/* { rate, dsor, fsref } */ -static const TSC210xRateInfo tsc2101_rates[] = { - /* Fsref / 6.0 */ - { 7350, 7, 1 }, - { 8000, 7, 0 }, - /* Fsref / 5.5 */ - { 8018, 6, 1 }, - { 8727, 6, 0 }, - /* Fsref / 5.0 */ - { 8820, 5, 1 }, - { 9600, 5, 0 }, - /* Fsref / 4.0 */ - { 11025, 4, 1 }, - { 12000, 4, 0 }, - /* Fsref / 3.0 */ - { 14700, 3, 1 }, - { 16000, 3, 0 }, - /* Fsref / 2.0 */ - { 22050, 2, 1 }, - { 24000, 2, 0 }, - /* Fsref / 1.5 */ - { 29400, 1, 1 }, - { 32000, 1, 0 }, - /* Fsref */ - { 44100, 0, 1 }, - { 48000, 0, 0 }, - - { 0, 0, 0 }, -}; - /* { rate, dsor, fsref } */ static const TSC210xRateInfo tsc2102_rates[] = { /* Fsref / 6.0 */ diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c index ec5f9ad815..e48f66c8fa 100644 --- a/hw/intc/imx_avic.c +++ b/hw/intc/imx_avic.c @@ -97,15 +97,6 @@ static inline int imx_avic_prio(IMXAVICState *s, int irq) return 0xf & (s->prio[word] >> part); } -static inline void imx_avic_set_prio(IMXAVICState *s, int irq, int prio) -{ - uint32_t word = irq / PRIO_PER_WORD; - uint32_t part = 4 * (irq % PRIO_PER_WORD); - uint32_t mask = ~(0xf << part); - s->prio[word] &= mask; - s->prio[word] |= prio << part; -} - /* Update interrupts. */ static void imx_avic_update(IMXAVICState *s) { diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 2c30b3d8bd..b5cf7cacc0 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -87,7 +87,7 @@ static void balloon_stats_destroy_timer(VirtIOBalloon *s) } } -static void balloon_stats_change_timer(VirtIOBalloon *s, int secs) +static void balloon_stats_change_timer(VirtIOBalloon *s, int64_t secs) { timer_mod(s->stats_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + secs * 1000); } @@ -170,6 +170,11 @@ static void balloon_stats_set_poll_interval(Object *obj, struct Visitor *v, return; } + if (value > UINT_MAX) { + error_setg(errp, "timer value is too big"); + return; + } + if (value == s->stats_poll_interval) { return; } diff --git a/include/elf.h b/include/elf.h index 70107f0c3f..a516584485 100644 --- a/include/elf.h +++ b/include/elf.h @@ -473,14 +473,35 @@ typedef struct { #define PPC_FEATURE_TRUE_LE 0x00000002 #define PPC_FEATURE_PPC_LE 0x00000001 -/* Bits present in AT_HWCAP, primarily for Sparc32. */ - -#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */ -#define HWCAP_SPARC_STBAR 2 -#define HWCAP_SPARC_SWAP 4 -#define HWCAP_SPARC_MULDIV 8 -#define HWCAP_SPARC_V9 16 -#define HWCAP_SPARC_ULTRA3 32 +/* Bits present in AT_HWCAP for Sparc. */ + +#define HWCAP_SPARC_FLUSH 0x00000001 +#define HWCAP_SPARC_STBAR 0x00000002 +#define HWCAP_SPARC_SWAP 0x00000004 +#define HWCAP_SPARC_MULDIV 0x00000008 +#define HWCAP_SPARC_V9 0x00000010 +#define HWCAP_SPARC_ULTRA3 0x00000020 +#define HWCAP_SPARC_BLKINIT 0x00000040 +#define HWCAP_SPARC_N2 0x00000080 +#define HWCAP_SPARC_MUL32 0x00000100 +#define HWCAP_SPARC_DIV32 0x00000200 +#define HWCAP_SPARC_FSMULD 0x00000400 +#define HWCAP_SPARC_V8PLUS 0x00000800 +#define HWCAP_SPARC_POPC 0x00001000 +#define HWCAP_SPARC_VIS 0x00002000 +#define HWCAP_SPARC_VIS2 0x00004000 +#define HWCAP_SPARC_ASI_BLK_INIT 0x00008000 +#define HWCAP_SPARC_FMAF 0x00010000 +#define HWCAP_SPARC_VIS3 0x00020000 +#define HWCAP_SPARC_HPC 0x00040000 +#define HWCAP_SPARC_RANDOM 0x00080000 +#define HWCAP_SPARC_TRANS 0x00100000 +#define HWCAP_SPARC_FJFMAU 0x00200000 +#define HWCAP_SPARC_IMA 0x00400000 +#define HWCAP_SPARC_ASI_CACHE_SPARING 0x00800000 +#define HWCAP_SPARC_PAUSE 0x01000000 +#define HWCAP_SPARC_CBCOND 0x02000000 +#define HWCAP_SPARC_CRYPTO 0x04000000 /* Bits present in AT_HWCAP for s390. */ diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h index 902d1a7a18..0ca6cbd0e6 100644 --- a/include/qapi/qmp/qerror.h +++ b/include/qapi/qmp/qerror.h @@ -154,16 +154,4 @@ void qerror_report_err(Error *err); #define QERR_UNSUPPORTED \ ERROR_CLASS_GENERIC_ERROR, "this feature or command is not currently supported" -#define QERR_SOCKET_CONNECT_FAILED \ - ERROR_CLASS_GENERIC_ERROR, "Failed to connect to socket" - -#define QERR_SOCKET_LISTEN_FAILED \ - ERROR_CLASS_GENERIC_ERROR, "Failed to set socket to listening mode" - -#define QERR_SOCKET_BIND_FAILED \ - ERROR_CLASS_GENERIC_ERROR, "Failed to bind socket" - -#define QERR_SOCKET_CREATE_FAILED \ - ERROR_CLASS_GENERIC_ERROR, "Failed to create socket" - #endif /* QERROR_H */ diff --git a/include/qapi/visitor-impl.h b/include/qapi/visitor-impl.h index ecc0183196..09bb0fd408 100644 --- a/include/qapi/visitor-impl.h +++ b/include/qapi/visitor-impl.h @@ -55,6 +55,8 @@ struct Visitor void (*type_int64)(Visitor *v, int64_t *obj, const char *name, Error **errp); /* visit_type_size() falls back to (*type_uint64)() if type_size is unset */ void (*type_size)(Visitor *v, uint64_t *obj, const char *name, Error **errp); + bool (*start_union)(Visitor *v, bool data_present, Error **errp); + void (*end_union)(Visitor *v, bool data_present, Error **errp); }; void input_type_enum(Visitor *v, int *obj, const char *strings[], diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h index 4a0178fa46..5934f59ad8 100644 --- a/include/qapi/visitor.h +++ b/include/qapi/visitor.h @@ -58,5 +58,7 @@ void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp); void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp); void visit_type_str(Visitor *v, char **obj, const char *name, Error **errp); void visit_type_number(Visitor *v, double *obj, const char *name, Error **errp); +bool visit_start_union(Visitor *v, bool data_present, Error **errp); +void visit_end_union(Visitor *v, bool data_present, Error **errp); #endif diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index 155b358964..ac7c4c441e 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -24,6 +24,12 @@ #define QEMU_WARN_UNUSED_RESULT #endif +#if QEMU_GNUC_PREREQ(4, 3) +#define QEMU_ARTIFICIAL __attribute__((always_inline, artificial)) +#else +#define QEMU_ARTIFICIAL +#endif + #if defined(_WIN32) # define QEMU_PACKED __attribute__((gcc_struct, packed)) #else diff --git a/include/ui/console.h b/include/ui/console.h index cde0faf6e5..22ef8ca6b3 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -292,6 +292,9 @@ typedef struct GraphicHwOps { QemuConsole *graphic_console_init(DeviceState *dev, uint32_t head, const GraphicHwOps *ops, void *opaque); +void graphic_console_set_hwops(QemuConsole *con, + const GraphicHwOps *hw_ops, + void *opaque); void graphic_hw_update(QemuConsole *con); void graphic_hw_invalidate(QemuConsole *con); @@ -2922,6 +2922,13 @@ static mon_cmd_t info_cmds[] = { .mhandler.cmd = hmp_info_memdev, }, { + .name = "memory-devices", + .args_type = "", + .params = "", + .help = "show memory devices", + .mhandler.cmd = hmp_info_memory_devices, + }, + { .name = NULL, }, }; @@ -5249,6 +5256,7 @@ static void monitor_event(void *opaque, int event) monitor_printf(mon, "QEMU %s monitor - type 'help' for more " "information\n", QEMU_VERSION); if (!mon->mux_out) { + readline_restart(mon->rs); readline_show_prompt(mon->rs); } mon->reset_seen = 1; diff --git a/qapi/qapi-dealloc-visitor.c b/qapi/qapi-dealloc-visitor.c index dc53545fa5..a14a1c7146 100644 --- a/qapi/qapi-dealloc-visitor.c +++ b/qapi/qapi-dealloc-visitor.c @@ -162,6 +162,31 @@ static void qapi_dealloc_type_enum(Visitor *v, int *obj, const char *strings[], { } +/* If there's no data present, the dealloc visitor has nothing to free. + * Thus, indicate to visitor code that the subsequent union fields can + * be skipped. This is not an error condition, since the cleanup of the + * rest of an object can continue unhindered, so leave errp unset in + * these cases. + * + * NOTE: In cases where we're attempting to deallocate an object that + * may have missing fields, the field indicating the union type may + * be missing. In such a case, it's possible we don't have enough + * information to differentiate data_present == false from a case where + * data *is* present but happens to be a scalar with a value of 0. + * This is okay, since in the case of the dealloc visitor there's no + * work that needs to done in either situation. + * + * The current inability in QAPI code to more thoroughly verify a union + * type in such cases will likely need to be addressed if we wish to + * implement this interface for other types of visitors in the future, + * however. + */ +static bool qapi_dealloc_start_union(Visitor *v, bool data_present, + Error **errp) +{ + return data_present; +} + Visitor *qapi_dealloc_get_visitor(QapiDeallocVisitor *v) { return &v->visitor; @@ -191,6 +216,7 @@ QapiDeallocVisitor *qapi_dealloc_visitor_new(void) v->visitor.type_str = qapi_dealloc_type_str; v->visitor.type_number = qapi_dealloc_type_number; v->visitor.type_size = qapi_dealloc_type_size; + v->visitor.start_union = qapi_dealloc_start_union; QTAILQ_INIT(&v->stack); diff --git a/qapi/qapi-visit-core.c b/qapi/qapi-visit-core.c index 55f8d4068c..b66b93ae2b 100644 --- a/qapi/qapi-visit-core.c +++ b/qapi/qapi-visit-core.c @@ -58,6 +58,21 @@ void visit_end_list(Visitor *v, Error **errp) v->end_list(v, errp); } +bool visit_start_union(Visitor *v, bool data_present, Error **errp) +{ + if (v->start_union) { + return v->start_union(v, data_present, errp); + } + return true; +} + +void visit_end_union(Visitor *v, bool data_present, Error **errp) +{ + if (v->end_union) { + v->end_union(v, data_present, errp); + } +} + void visit_optional(Visitor *v, bool *present, const char *name, Error **errp) { diff --git a/scripts/qapi-visit.py b/scripts/qapi-visit.py index df9f7fb657..8f845a2b29 100644 --- a/scripts/qapi-visit.py +++ b/scripts/qapi-visit.py @@ -358,6 +358,9 @@ void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **e if (err) { goto out_obj; } + if (!visit_start_union(m, !!(*obj)->data, &err) || err) { + goto out_obj; + } switch ((*obj)->kind) { ''', disc_type = disc_type, @@ -386,6 +389,9 @@ void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **e out_obj: error_propagate(errp, err); err = NULL; + visit_end_union(m, !!(*obj)->data, &err); + error_propagate(errp, err); + err = NULL; } visit_end_struct(m, &err); out: diff --git a/target-arm/cpu.c b/target-arm/cpu.c index 407f977742..8ab6d9532e 100644 --- a/target-arm/cpu.c +++ b/target-arm/cpu.c @@ -41,7 +41,9 @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value) static bool arm_cpu_has_work(CPUState *cs) { return cs->interrupt_request & - (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB); + (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD + | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ + | CPU_INTERRUPT_EXITTB); } static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) @@ -173,11 +175,6 @@ static void arm_cpu_reset(CPUState *s) set_float_detect_tininess(float_tininess_before_rounding, &env->vfp.standard_fp_status); tlb_flush(s, 1); - /* Reset is a state change for some CPUARMState fields which we - * bake assumptions about into translated code, so we need to - * tb_flush(). - */ - tb_flush(env); #ifndef CONFIG_USER_ONLY if (kvm_enabled()) { @@ -185,18 +182,17 @@ static void arm_cpu_reset(CPUState *s) } #endif + hw_breakpoint_update_all(cpu); hw_watchpoint_update_all(cpu); } bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { CPUClass *cc = CPU_GET_CLASS(cs); - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; bool ret = false; if (interrupt_request & CPU_INTERRUPT_FIQ - && !(env->daif & PSTATE_F)) { + && arm_excp_unmasked(cs, EXCP_FIQ)) { cs->exception_index = EXCP_FIQ; cc->do_interrupt(cs); ret = true; @@ -211,12 +207,23 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) We avoid this by disabling interrupts when pc contains a magic address. */ if (interrupt_request & CPU_INTERRUPT_HARD - && !(env->daif & PSTATE_I) - && (!IS_M(env) || env->regs[15] < 0xfffffff0)) { + && arm_excp_unmasked(cs, EXCP_IRQ)) { cs->exception_index = EXCP_IRQ; cc->do_interrupt(cs); ret = true; } + if (interrupt_request & CPU_INTERRUPT_VIRQ + && arm_excp_unmasked(cs, EXCP_VIRQ)) { + cs->exception_index = EXCP_VIRQ; + cc->do_interrupt(cs); + ret = true; + } + if (interrupt_request & CPU_INTERRUPT_VFIQ + && arm_excp_unmasked(cs, EXCP_VFIQ)) { + cs->exception_index = EXCP_VFIQ; + cc->do_interrupt(cs); + ret = true; + } return ret; } @@ -225,21 +232,29 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) static void arm_cpu_set_irq(void *opaque, int irq, int level) { ARMCPU *cpu = opaque; + CPUARMState *env = &cpu->env; CPUState *cs = CPU(cpu); + static const int mask[] = { + [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD, + [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ, + [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ, + [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ + }; switch (irq) { - case ARM_CPU_IRQ: - if (level) { - cpu_interrupt(cs, CPU_INTERRUPT_HARD); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + case ARM_CPU_VIRQ: + case ARM_CPU_VFIQ: + if (!arm_feature(env, ARM_FEATURE_EL2)) { + hw_error("%s: Virtual interrupt line %d with no EL2 support\n", + __func__, irq); } - break; + /* fall through */ + case ARM_CPU_IRQ: case ARM_CPU_FIQ: if (level) { - cpu_interrupt(cs, CPU_INTERRUPT_FIQ); + cpu_interrupt(cs, mask[irq]); } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_FIQ); + cpu_reset_interrupt(cs, mask[irq]); } break; default: @@ -289,9 +304,12 @@ static void arm_cpu_initfn(Object *obj) #ifndef CONFIG_USER_ONLY /* Our inbound IRQ and FIQ lines */ if (kvm_enabled()) { - qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 2); + /* VIRQ and VFIQ are unused with KVM but we add them to maintain + * the same interface as non-KVM CPUs. + */ + qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4); } else { - qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 2); + qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4); } cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, diff --git a/target-arm/cpu.h b/target-arm/cpu.h index d1e1ccb605..65a3417951 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -51,6 +51,11 @@ #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */ #define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */ #define EXCP_STREX 10 +#define EXCP_HVC 11 /* HyperVisor Call */ +#define EXCP_HYP_TRAP 12 +#define EXCP_SMC 13 /* Secure Monitor Call */ +#define EXCP_VIRQ 14 +#define EXCP_VFIQ 15 #define ARMV7M_EXCP_RESET 1 #define ARMV7M_EXCP_NMI 2 @@ -65,6 +70,8 @@ /* ARM-specific interrupt pending bits. */ #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 +#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 +#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 /* The usual mapping for an AArch64 system register to its AArch32 * counterpart is for the 32 bit world to have access to the lower @@ -80,9 +87,11 @@ #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) #endif -/* Meanings of the ARMCPU object's two inbound GPIO lines */ +/* Meanings of the ARMCPU object's four inbound GPIO lines */ #define ARM_CPU_IRQ 0 #define ARM_CPU_FIQ 1 +#define ARM_CPU_VIRQ 2 +#define ARM_CPU_VFIQ 3 typedef void ARMWriteCPFunc(void *opaque, int cp_info, int srcreg, int operand, uint32_t value); @@ -172,7 +181,6 @@ typedef struct CPUARMState { uint64_t c1_sys; /* System control register. */ uint64_t c1_coproc; /* Coprocessor access register. */ uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ - uint32_t c1_scr; /* secure config register. */ uint64_t ttbr0_el1; /* MMU translation table base 0. */ uint64_t ttbr1_el1; /* MMU translation table base 1. */ uint64_t c2_control; /* MMU translation table base control. */ @@ -184,6 +192,8 @@ typedef struct CPUARMState { MPU write buffer control. */ uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ + uint64_t hcr_el2; /* Hypervisor configuration register */ + uint64_t scr_el3; /* Secure configuration register. */ uint32_t ifsr_el2; /* Fault status registers. */ uint64_t esr_el[4]; uint32_t c6_region[8]; /* MPU base/size registers. */ @@ -323,6 +333,7 @@ typedef struct CPUARMState { int eabi; #endif + struct CPUBreakpoint *cpu_breakpoint[16]; struct CPUWatchpoint *cpu_watchpoint[16]; CPU_COMMON @@ -498,6 +509,12 @@ void pmccntr_sync(CPUARMState *env); #define PSTATE_MODE_EL1t 4 #define PSTATE_MODE_EL0t 0 +/* Map EL and handler into a PSTATE_MODE. */ +static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) +{ + return (el << 2) | handler; +} + /* Return the current PSTATE value. For the moment we don't support 32<->64 bit * interprocessing, so we don't attempt to sync with the cpsr state used by * the 32 bit decoder. @@ -565,6 +582,58 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) } } +#define HCR_VM (1ULL << 0) +#define HCR_SWIO (1ULL << 1) +#define HCR_PTW (1ULL << 2) +#define HCR_FMO (1ULL << 3) +#define HCR_IMO (1ULL << 4) +#define HCR_AMO (1ULL << 5) +#define HCR_VF (1ULL << 6) +#define HCR_VI (1ULL << 7) +#define HCR_VSE (1ULL << 8) +#define HCR_FB (1ULL << 9) +#define HCR_BSU_MASK (3ULL << 10) +#define HCR_DC (1ULL << 12) +#define HCR_TWI (1ULL << 13) +#define HCR_TWE (1ULL << 14) +#define HCR_TID0 (1ULL << 15) +#define HCR_TID1 (1ULL << 16) +#define HCR_TID2 (1ULL << 17) +#define HCR_TID3 (1ULL << 18) +#define HCR_TSC (1ULL << 19) +#define HCR_TIDCP (1ULL << 20) +#define HCR_TACR (1ULL << 21) +#define HCR_TSW (1ULL << 22) +#define HCR_TPC (1ULL << 23) +#define HCR_TPU (1ULL << 24) +#define HCR_TTLB (1ULL << 25) +#define HCR_TVM (1ULL << 26) +#define HCR_TGE (1ULL << 27) +#define HCR_TDZ (1ULL << 28) +#define HCR_HCD (1ULL << 29) +#define HCR_TRVM (1ULL << 30) +#define HCR_RW (1ULL << 31) +#define HCR_CD (1ULL << 32) +#define HCR_ID (1ULL << 33) +#define HCR_MASK ((1ULL << 34) - 1) + +#define SCR_NS (1U << 0) +#define SCR_IRQ (1U << 1) +#define SCR_FIQ (1U << 2) +#define SCR_EA (1U << 3) +#define SCR_FW (1U << 4) +#define SCR_AW (1U << 5) +#define SCR_NET (1U << 6) +#define SCR_SMD (1U << 7) +#define SCR_HCE (1U << 8) +#define SCR_SIF (1U << 9) +#define SCR_RW (1U << 10) +#define SCR_ST (1U << 11) +#define SCR_TWI (1U << 12) +#define SCR_TWE (1U << 13) +#define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST)) +#define SCR_AARCH64_MASK (0x3fff & ~SCR_NET) + /* Return the current FPSCR value. */ uint32_t vfp_get_fpscr(CPUARMState *env); void vfp_set_fpscr(CPUARMState *env, uint32_t val); @@ -701,6 +770,7 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el) } void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf); +unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx); /* Interface between CPU and Interrupt controller. */ void armv7m_nvic_set_pending(void *opaque, int irq); @@ -1111,6 +1181,61 @@ bool write_cpustate_to_list(ARMCPU *cpu); # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif +static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx) +{ + CPUARMState *env = cs->env_ptr; + unsigned int cur_el = arm_current_pl(env); + unsigned int target_el = arm_excp_target_el(cs, excp_idx); + /* FIXME: Use actual secure state. */ + bool secure = false; + /* If in EL1/0, Physical IRQ routing to EL2 only happens from NS state. */ + bool irq_can_hyp = !secure && cur_el < 2 && target_el == 2; + /* ARMv7-M interrupt return works by loading a magic value + * into the PC. On real hardware the load causes the + * return to occur. The qemu implementation performs the + * jump normally, then does the exception return when the + * CPU tries to execute code at the magic address. + * This will cause the magic PC value to be pushed to + * the stack if an interrupt occurred at the wrong time. + * We avoid this by disabling interrupts when + * pc contains a magic address. + */ + bool irq_unmasked = !(env->daif & PSTATE_I) + && (!IS_M(env) || env->regs[15] < 0xfffffff0); + + /* Don't take exceptions if they target a lower EL. */ + if (cur_el > target_el) { + return false; + } + + switch (excp_idx) { + case EXCP_FIQ: + if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_FMO)) { + return true; + } + return !(env->daif & PSTATE_F); + case EXCP_IRQ: + if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_IMO)) { + return true; + } + return irq_unmasked; + case EXCP_VFIQ: + if (!secure && !(env->cp15.hcr_el2 & HCR_FMO)) { + /* VFIQs are only taken when hypervized and non-secure. */ + return false; + } + return !(env->daif & PSTATE_F); + case EXCP_VIRQ: + if (!secure && !(env->cp15.hcr_el2 & HCR_IMO)) { + /* VIRQs are only taken when hypervized and non-secure. */ + return false; + } + return irq_unmasked; + default: + g_assert_not_reached(); + } +} + static inline CPUARMState *cpu_init(const char *cpu_model) { ARMCPU *cpu = cpu_arm_init(cpu_model); @@ -1223,6 +1348,11 @@ static inline bool arm_singlestep_active(CPUARMState *env) #define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT) #define ARM_TBFLAG_PSTATE_SS_SHIFT 19 #define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT) +/* We store the bottom two bits of the CPAR as TB flags and handle + * checks on the other bits at runtime + */ +#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20 +#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT) /* Bit usage when in AArch64 state */ #define ARM_TBFLAG_AA64_EL_SHIFT 0 @@ -1257,6 +1387,8 @@ static inline bool arm_singlestep_active(CPUARMState *env) (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT) #define ARM_TBFLAG_PSTATE_SS(F) \ (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT) +#define ARM_TBFLAG_XSCALE_CPAR(F) \ + (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT) #define ARM_TBFLAG_AA64_EL(F) \ (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT) #define ARM_TBFLAG_AA64_FPEN(F) \ @@ -1334,6 +1466,8 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, *flags |= ARM_TBFLAG_PSTATE_SS_MASK; } } + *flags |= (extract32(env->cp15.c15_cpar, 0, 2) + << ARM_TBFLAG_XSCALE_CPAR_SHIFT); } *cs_base = 0; diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c index 2e9ef64786..8228e29486 100644 --- a/target-arm/helper-a64.c +++ b/target-arm/helper-a64.c @@ -443,10 +443,12 @@ void aarch64_cpu_do_interrupt(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; - target_ulong addr = env->cp15.vbar_el[1]; + unsigned int new_el = arm_excp_target_el(cs, cs->exception_index); + target_ulong addr = env->cp15.vbar_el[new_el]; + unsigned int new_mode = aarch64_pstate_mode(new_el, true); int i; - if (arm_current_pl(env) == 0) { + if (arm_current_pl(env) < new_el) { if (env->aarch64) { addr += 0x400; } else { @@ -464,23 +466,27 @@ void aarch64_cpu_do_interrupt(CPUState *cs) env->exception.syndrome); } - env->cp15.esr_el[1] = env->exception.syndrome; - env->cp15.far_el[1] = env->exception.vaddress; - switch (cs->exception_index) { case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: + env->cp15.far_el[new_el] = env->exception.vaddress; qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", - env->cp15.far_el[1]); - break; + env->cp15.far_el[new_el]); + /* fall through */ case EXCP_BKPT: case EXCP_UDEF: case EXCP_SWI: + case EXCP_HVC: + case EXCP_HYP_TRAP: + case EXCP_SMC: + env->cp15.esr_el[new_el] = env->exception.syndrome; break; case EXCP_IRQ: + case EXCP_VIRQ: addr += 0x80; break; case EXCP_FIQ: + case EXCP_VFIQ: addr += 0x100; break; default: @@ -488,15 +494,15 @@ void aarch64_cpu_do_interrupt(CPUState *cs) } if (is_a64(env)) { - env->banked_spsr[aarch64_banked_spsr_index(1)] = pstate_read(env); + env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); aarch64_save_sp(env, arm_current_pl(env)); - env->elr_el[1] = env->pc; + env->elr_el[new_el] = env->pc; } else { env->banked_spsr[0] = cpsr_read(env); if (!env->thumb) { - env->cp15.esr_el[1] |= 1 << 25; + env->cp15.esr_el[new_el] |= 1 << 25; } - env->elr_el[1] = env->regs[15]; + env->elr_el[new_el] = env->regs[15]; for (i = 0; i < 15; i++) { env->xregs[i] = env->regs[i]; @@ -505,9 +511,9 @@ void aarch64_cpu_do_interrupt(CPUState *cs) env->condexec_bits = 0; } - pstate_write(env, PSTATE_DAIF | PSTATE_MODE_EL1h); + pstate_write(env, PSTATE_DAIF | new_mode); env->aarch64 = 1; - aarch64_restore_sp(env, 1); + aarch64_restore_sp(env, new_el); env->pc = addr; cs->interrupt_request |= CPU_INTERRUPT_EXITTB; diff --git a/target-arm/helper.c b/target-arm/helper.c index ece967397f..2669e15cb8 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -747,6 +747,32 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, raw_write(env, ri, value & ~0x1FULL); } +static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + /* We only mask off bits that are RES0 both for AArch64 and AArch32. + * For bits that vary between AArch32/64, code needs to check the + * current execution mode before directly using the feature bit. + */ + uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK; + + if (!arm_feature(env, ARM_FEATURE_EL2)) { + valid_mask &= ~SCR_HCE; + + /* On ARMv7, SMD (or SCD as it is called in v7) is only + * supported if EL2 exists. The bit is UNK/SBZP when + * EL2 is unavailable. In QEMU ARMv7, we force it to always zero + * when EL2 is unavailable. + */ + if (arm_feature(env, ARM_FEATURE_V7)) { + valid_mask &= ~SCR_SMD; + } + } + + /* Clear all-context RES0 bits. */ + value &= valid_mask; + raw_write(env, ri, value); +} + static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = arm_env_get_cpu(env); @@ -873,8 +899,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[1]), .resetvalue = 0 }, { .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr), - .resetvalue = 0, }, + .access = PL1_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), + .resetvalue = 0, .writefn = scr_write }, { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE }, @@ -1714,12 +1740,7 @@ static const ARMCPRegInfo omap_cp_reginfo[] = { static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - value &= 0x3fff; - if (env->cp15.c15_cpar != value) { - /* Changes cp0 to cp13 behavior, so needs a TB flush. */ - tb_flush(env); - env->cp15.c15_cpar = value; - } + env->cp15.c15_cpar = value & 0x3fff; } static const ARMCPRegInfo xscale_cp_reginfo[] = { @@ -2230,10 +2251,44 @@ static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, .access = PL2_RW, .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, + { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_NO_MIGRATE, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, + .access = PL2_RW, + .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, REGINFO_SENTINEL }; +static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + uint64_t valid_mask = HCR_MASK; + + if (arm_feature(env, ARM_FEATURE_EL3)) { + valid_mask &= ~HCR_HCD; + } else { + valid_mask &= ~HCR_TSC; + } + + /* Clear RES0 bits. */ + value &= valid_mask; + + /* These bits change the MMU setup: + * HCR_VM enables stage 2 translation + * HCR_PTW forbids certain page-table setups + * HCR_DC Disables stage1 and enables stage2 translation + */ + if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { + tlb_flush(CPU(cpu), 1); + } + raw_write(env, ri, value); +} + static const ARMCPRegInfo v8_el2_cp_reginfo[] = { + { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, + .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), + .writefn = hcr_write }, { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_NO_MIGRATE, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, @@ -2280,6 +2335,11 @@ static const ARMCPRegInfo v8_el3_cp_reginfo[] = { .access = PL3_RW, .writefn = vbar_write, .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), .resetvalue = 0 }, + { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_NO_MIGRATE, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, + .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), + .writefn = scr_write }, REGINFO_SENTINEL }; @@ -2492,6 +2552,124 @@ static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, hw_watchpoint_update(cpu, i); } +void hw_breakpoint_update(ARMCPU *cpu, int n) +{ + CPUARMState *env = &cpu->env; + uint64_t bvr = env->cp15.dbgbvr[n]; + uint64_t bcr = env->cp15.dbgbcr[n]; + vaddr addr; + int bt; + int flags = BP_CPU; + + if (env->cpu_breakpoint[n]) { + cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); + env->cpu_breakpoint[n] = NULL; + } + + if (!extract64(bcr, 0, 1)) { + /* E bit clear : watchpoint disabled */ + return; + } + + bt = extract64(bcr, 20, 4); + + switch (bt) { + case 4: /* unlinked address mismatch (reserved if AArch64) */ + case 5: /* linked address mismatch (reserved if AArch64) */ + qemu_log_mask(LOG_UNIMP, + "arm: address mismatch breakpoint types not implemented"); + return; + case 0: /* unlinked address match */ + case 1: /* linked address match */ + { + /* Bits [63:49] are hardwired to the value of bit [48]; that is, + * we behave as if the register was sign extended. Bits [1:0] are + * RES0. The BAS field is used to allow setting breakpoints on 16 + * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether + * a bp will fire if the addresses covered by the bp and the addresses + * covered by the insn overlap but the insn doesn't start at the + * start of the bp address range. We choose to require the insn and + * the bp to have the same address. The constraints on writing to + * BAS enforced in dbgbcr_write mean we have only four cases: + * 0b0000 => no breakpoint + * 0b0011 => breakpoint on addr + * 0b1100 => breakpoint on addr + 2 + * 0b1111 => breakpoint on addr + * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). + */ + int bas = extract64(bcr, 5, 4); + addr = sextract64(bvr, 0, 49) & ~3ULL; + if (bas == 0) { + return; + } + if (bas == 0xc) { + addr += 2; + } + break; + } + case 2: /* unlinked context ID match */ + case 8: /* unlinked VMID match (reserved if no EL2) */ + case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ + qemu_log_mask(LOG_UNIMP, + "arm: unlinked context breakpoint types not implemented"); + return; + case 9: /* linked VMID match (reserved if no EL2) */ + case 11: /* linked context ID and VMID match (reserved if no EL2) */ + case 3: /* linked context ID match */ + default: + /* We must generate no events for Linked context matches (unless + * they are linked to by some other bp/wp, which is handled in + * updates for the linking bp/wp). We choose to also generate no events + * for reserved values. + */ + return; + } + + cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); +} + +void hw_breakpoint_update_all(ARMCPU *cpu) +{ + int i; + CPUARMState *env = &cpu->env; + + /* Completely clear out existing QEMU breakpoints and our array, to + * avoid possible stale entries following migration load. + */ + cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); + memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); + + for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { + hw_breakpoint_update(cpu, i); + } +} + +static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int i = ri->crm; + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + +static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int i = ri->crm; + + /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only + * copy of BAS[0]. + */ + value = deposit64(value, 6, 1, extract64(value, 5, 1)); + value = deposit64(value, 8, 1, extract64(value, 7, 1)); + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + static void define_debug_regs(ARMCPU *cpu) { /* Define v7 and v8 architectural debug registers. @@ -2533,11 +2711,15 @@ static void define_debug_regs(ARMCPU *cpu) { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, .access = PL1_RW, - .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]) }, + .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), + .writefn = dbgbvr_write, .raw_writefn = raw_write + }, { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, .access = PL1_RW, - .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]) }, + .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), + .writefn = dbgbcr_write, .raw_writefn = raw_write + }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, dbgregs); @@ -3522,6 +3704,11 @@ uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) return 0; } +unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx) +{ + return 1; +} + #else /* Map CPU modes onto saved register banks. */ @@ -3577,6 +3764,57 @@ void switch_mode(CPUARMState *env, int mode) env->spsr = env->banked_spsr[i]; } +/* + * Determine the target EL for a given exception type. + */ +unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + unsigned int cur_el = arm_current_pl(env); + unsigned int target_el; + /* FIXME: Use actual secure state. */ + bool secure = false; + + if (!env->aarch64) { + /* TODO: Add EL2 and 3 exception handling for AArch32. */ + return 1; + } + + switch (excp_idx) { + case EXCP_HVC: + case EXCP_HYP_TRAP: + target_el = 2; + break; + case EXCP_SMC: + target_el = 3; + break; + case EXCP_FIQ: + case EXCP_IRQ: + { + const uint64_t hcr_mask = excp_idx == EXCP_FIQ ? HCR_FMO : HCR_IMO; + const uint32_t scr_mask = excp_idx == EXCP_FIQ ? SCR_FIQ : SCR_IRQ; + + target_el = 1; + if (!secure && (env->cp15.hcr_el2 & hcr_mask)) { + target_el = 2; + } + if (env->cp15.scr_el3 & scr_mask) { + target_el = 3; + } + break; + } + case EXCP_VIRQ: + case EXCP_VFIQ: + target_el = 1; + break; + default: + target_el = MAX(cur_el, 1); + break; + } + return target_el; +} + static void v7m_push(CPUARMState *env, uint32_t val) { CPUState *cs = CPU(arm_env_get_cpu(env)); diff --git a/target-arm/helper.h b/target-arm/helper.h index 1d7003b70a..dec3728798 100644 --- a/target-arm/helper.h +++ b/target-arm/helper.h @@ -50,6 +50,8 @@ DEF_HELPER_2(exception_internal, void, env, i32) DEF_HELPER_3(exception_with_syndrome, void, env, i32, i32) DEF_HELPER_1(wfi, void, env) DEF_HELPER_1(wfe, void, env) +DEF_HELPER_1(pre_hvc, void, env) +DEF_HELPER_2(pre_smc, void, env, i32) DEF_HELPER_3(cpsr_write, void, env, i32, i32) DEF_HELPER_1(cpsr_read, i32, env) diff --git a/target-arm/internals.h b/target-arm/internals.h index 64751a0798..b7547bbb76 100644 --- a/target-arm/internals.h +++ b/target-arm/internals.h @@ -53,6 +53,11 @@ static const char * const excnames[] = { [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", [EXCP_STREX] = "QEMU intercept of STREX", + [EXCP_HVC] = "Hypervisor Call", + [EXCP_HYP_TRAP] = "Hypervisor Trap", + [EXCP_SMC] = "Secure Monitor Call", + [EXCP_VIRQ] = "Virtual IRQ", + [EXCP_VFIQ] = "Virtual FIQ", }; static inline void arm_log_exception(int idx) @@ -215,6 +220,16 @@ static inline uint32_t syn_aa64_svc(uint32_t imm16) return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); } +static inline uint32_t syn_aa64_hvc(uint32_t imm16) +{ + return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa64_smc(uint32_t imm16) +{ + return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_thumb) { return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) @@ -313,6 +328,12 @@ static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr) | (cm << 8) | (wnr << 6) | 0x22; } +static inline uint32_t syn_breakpoint(int same_el) +{ + return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | ARM_EL_IL | 0x22; +} + /* Update a QEMU watchpoint based on the information the guest has set in the * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. */ @@ -322,6 +343,15 @@ void hw_watchpoint_update(ARMCPU *cpu, int n); * suitable for use after migration or on reset. */ void hw_watchpoint_update_all(ARMCPU *cpu); +/* Update a QEMU breakpoint based on the information the guest has set in the + * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. + */ +void hw_breakpoint_update(ARMCPU *cpu, int n); +/* Update the QEMU breakpoints for every guest breakpoint. This does a + * complete delete-and-reinstate of the QEMU breakpoint list and so is + * suitable for use after migration or on reset. + */ +void hw_breakpoint_update_all(ARMCPU *cpu); /* Callback function for when a watchpoint or breakpoint triggers. */ void arm_debug_excp_handler(CPUState *cs); diff --git a/target-arm/machine.c b/target-arm/machine.c index 8dfe87cb6b..ddb7d05c28 100644 --- a/target-arm/machine.c +++ b/target-arm/machine.c @@ -214,6 +214,7 @@ static int cpu_post_load(void *opaque, int version_id) } } + hw_breakpoint_update_all(cpu); hw_watchpoint_update_all(cpu); return 0; diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c index b956216c4b..03ac92afdc 100644 --- a/target-arm/op_helper.c +++ b/target-arm/op_helper.c @@ -301,6 +301,17 @@ void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val) void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome) { const ARMCPRegInfo *ri = rip; + + if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 + && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { + env->exception.syndrome = syndrome; + raise_exception(env, EXCP_UDEF); + } + + if (!ri->accessfn) { + return; + } + switch (ri->accessfn(env, ri)) { case CP_ACCESS_OK: return; @@ -374,6 +385,63 @@ void HELPER(clear_pstate_ss)(CPUARMState *env) env->pstate &= ~PSTATE_SS; } +void HELPER(pre_hvc)(CPUARMState *env) +{ + int cur_el = arm_current_pl(env); + /* FIXME: Use actual secure state. */ + bool secure = false; + bool undef; + + /* We've already checked that EL2 exists at translation time. + * EL3.HCE has priority over EL2.HCD. + */ + if (arm_feature(env, ARM_FEATURE_EL3)) { + undef = !(env->cp15.scr_el3 & SCR_HCE); + } else { + undef = env->cp15.hcr_el2 & HCR_HCD; + } + + /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state. + * For ARMv8/AArch64, HVC is allowed in EL3. + * Note that we've already trapped HVC from EL0 at translation + * time. + */ + if (secure && (!is_a64(env) || cur_el == 1)) { + undef = true; + } + + if (undef) { + env->exception.syndrome = syn_uncategorized(); + raise_exception(env, EXCP_UDEF); + } +} + +void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) +{ + int cur_el = arm_current_pl(env); + /* FIXME: Use real secure state. */ + bool secure = false; + bool smd = env->cp15.scr_el3 & SCR_SMD; + /* On ARMv8 AArch32, SMD only applies to NS state. + * On ARMv7 SMD only applies to NS state and only if EL2 is available. + * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check + * the EL2 condition here. + */ + bool undef = is_a64(env) ? smd : (!secure && smd); + + /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */ + if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) { + env->exception.syndrome = syndrome; + raise_exception(env, EXCP_HYP_TRAP); + } + + /* We've already checked that EL3 exists at translation time. */ + if (undef) { + env->exception.syndrome = syn_uncategorized(); + raise_exception(env, EXCP_UDEF); + } +} + void HELPER(exception_return)(CPUARMState *env) { int cur_el = arm_current_pl(env); @@ -511,32 +579,43 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn) return false; } -static bool wp_matches(ARMCPU *cpu, int n) +static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) { CPUARMState *env = &cpu->env; - uint64_t wcr = env->cp15.dbgwcr[n]; + uint64_t cr; int pac, hmc, ssc, wt, lbn; /* TODO: check against CPU security state when we implement TrustZone */ bool is_secure = false; - if (!env->cpu_watchpoint[n] - || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) { - return false; - } + if (is_wp) { + if (!env->cpu_watchpoint[n] + || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) { + return false; + } + cr = env->cp15.dbgwcr[n]; + } else { + uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; + if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { + return false; + } + cr = env->cp15.dbgbcr[n]; + } /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is - * enabled and that the address and access type match; check the - * remaining fields, including linked breakpoints. - * Note that some combinations of {PAC, HMC SSC} are reserved and + * enabled and that the address and access type match; for breakpoints + * we know the address matched; check the remaining fields, including + * linked breakpoints. We rely on WCR and BCR having the same layout + * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. + * Note that some combinations of {PAC, HMC, SSC} are reserved and * must act either like some valid combination or as if the watchpoint * were disabled. We choose the former, and use this together with * the fact that EL3 must always be Secure and EL2 must always be * Non-Secure to simplify the code slightly compared to the full * table in the ARM ARM. */ - pac = extract64(wcr, 1, 2); - hmc = extract64(wcr, 13, 1); - ssc = extract64(wcr, 14, 2); + pac = extract64(cr, 1, 2); + hmc = extract64(cr, 13, 1); + ssc = extract64(cr, 14, 2); switch (ssc) { case 0: @@ -560,6 +639,7 @@ static bool wp_matches(ARMCPU *cpu, int n) * Implementing this would require reworking the core watchpoint code * to plumb the mmu_idx through to this point. Luckily Linux does not * rely on this behaviour currently. + * For breakpoints we do want to use the current CPU state. */ switch (arm_current_pl(env)) { case 3: @@ -582,8 +662,8 @@ static bool wp_matches(ARMCPU *cpu, int n) g_assert_not_reached(); } - wt = extract64(wcr, 20, 1); - lbn = extract64(wcr, 16, 4); + wt = extract64(cr, 20, 1); + lbn = extract64(cr, 16, 4); if (wt && !linked_bp_matches(cpu, lbn)) { return false; @@ -606,7 +686,28 @@ static bool check_watchpoints(ARMCPU *cpu) } for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { - if (wp_matches(cpu, n)) { + if (bp_wp_matches(cpu, n, true)) { + return true; + } + } + return false; +} + +static bool check_breakpoints(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + int n; + + /* If breakpoints are disabled globally or we can't take debug + * exceptions here then breakpoint firings are ignored. + */ + if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 + || !arm_generate_debug_exceptions(env)) { + return false; + } + + for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { + if (bp_wp_matches(cpu, n, false)) { return true; } } @@ -641,6 +742,18 @@ void arm_debug_excp_handler(CPUState *cs) cpu_resume_from_signal(cs, NULL); } } + } else { + if (check_breakpoints(cpu)) { + bool same_el = (arm_debug_target_el(env) == arm_current_pl(env)); + env->exception.syndrome = syn_breakpoint(same_el); + if (extended_addresses_enabled(env)) { + env->exception.fsr = (1 << 9) | 0x22; + } else { + env->exception.fsr = 0x2; + } + /* FAR is UNKNOWN, so doesn't need setting */ + raise_exception(env, EXCP_PREFETCH_ABORT); + } } } diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c index 8e66b6c972..35ae3ea281 100644 --- a/target-arm/translate-a64.c +++ b/target-arm/translate-a64.c @@ -1470,23 +1470,49 @@ static void disas_exc(DisasContext *s, uint32_t insn) int opc = extract32(insn, 21, 3); int op2_ll = extract32(insn, 0, 5); int imm16 = extract32(insn, 5, 16); + TCGv_i32 tmp; switch (opc) { case 0: - /* SVC, HVC, SMC; since we don't support the Virtualization - * or TrustZone extensions these all UNDEF except SVC. - */ - if (op2_ll != 1) { - unallocated_encoding(s); - break; - } /* For SVC, HVC and SMC we advance the single-step state * machine before taking the exception. This is architecturally * mandated, to ensure that single-stepping a system call * instruction works properly. */ - gen_ss_advance(s); - gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16)); + switch (op2_ll) { + case 1: + gen_ss_advance(s); + gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16)); + break; + case 2: + if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_pl == 0) { + unallocated_encoding(s); + break; + } + /* The pre HVC helper handles cases when HVC gets trapped + * as an undefined insn by runtime configuration. + */ + gen_a64_set_pc_im(s->pc - 4); + gen_helper_pre_hvc(cpu_env); + gen_ss_advance(s); + gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16)); + break; + case 3: + if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->current_pl == 0) { + unallocated_encoding(s); + break; + } + gen_a64_set_pc_im(s->pc - 4); + tmp = tcg_const_i32(syn_aa64_smc(imm16)); + gen_helper_pre_smc(cpu_env, tmp); + tcg_temp_free_i32(tmp); + gen_ss_advance(s); + gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16)); + break; + default: + unallocated_encoding(s); + break; + } break; case 1: if (op2_ll != 0) { diff --git a/target-arm/translate.c b/target-arm/translate.c index 2c0b1deaea..8a2994fcb4 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -7001,22 +7001,18 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn) const ARMCPRegInfo *ri; cpnum = (insn >> 8) & 0xf; - if (arm_feature(env, ARM_FEATURE_XSCALE) - && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum))) - return 1; - - /* First check for coprocessor space used for actual instructions */ - switch (cpnum) { - case 0: - case 1: - if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - return disas_iwmmxt_insn(env, s, insn); - } else if (arm_feature(env, ARM_FEATURE_XSCALE)) { - return disas_dsp_insn(env, s, insn); - } - return 1; - default: - break; + + /* First check for coprocessor space used for XScale/iwMMXt insns */ + if (arm_feature(env, ARM_FEATURE_XSCALE) && (cpnum < 2)) { + if (extract32(s->c15_cpar, cpnum, 1) == 0) { + return 1; + } + if (arm_feature(env, ARM_FEATURE_IWMMXT)) { + return disas_iwmmxt_insn(env, s, insn); + } else if (arm_feature(env, ARM_FEATURE_XSCALE)) { + return disas_dsp_insn(env, s, insn); + } + return 1; } /* Otherwise treat as a generic register access */ @@ -7049,9 +7045,12 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn) return 1; } - if (ri->accessfn) { + if (ri->accessfn || + (arm_feature(env, ARM_FEATURE_XSCALE) && cpnum < 14)) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. + * Note that on XScale all cp0..c13 registers do an access check + * call in order to handle c15_cpar. */ TCGv_ptr tmpptr; TCGv_i32 tcg_syn; @@ -7675,9 +7674,11 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) } else if ((insn & 0x0e000f00) == 0x0c000100) { if (arm_feature(env, ARM_FEATURE_IWMMXT)) { /* iWMMXt register transfer. */ - if (env->cp15.c15_cpar & (1 << 1)) - if (!disas_iwmmxt_insn(env, s, insn)) + if (extract32(s->c15_cpar, 1, 1)) { + if (!disas_iwmmxt_insn(env, s, insn)) { return; + } + } } } else if ((insn & 0x0fe00000) == 0x0c400000) { /* Coprocessor double register transfer. */ @@ -10942,6 +10943,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu, dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags); dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); + dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags); dc->cp_regs = cpu->cp_regs; dc->current_pl = arm_current_pl(env); dc->features = env->features; diff --git a/target-arm/translate.h b/target-arm/translate.h index b90d27514d..85c6f9dcb2 100644 --- a/target-arm/translate.h +++ b/target-arm/translate.h @@ -52,6 +52,8 @@ typedef struct DisasContext { bool is_ldex; /* True if a single-step exception will be taken to the current EL */ bool ss_same_el; + /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ + int c15_cpar; #define TMP_A64_MAX 16 int tmp_a64_count; TCGv_i64 tmp_a64[TMP_A64_MAX]; diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index 56dae66a3f..987c0bd4db 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -1007,7 +1007,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_adr(s, TCG_REG_X3, lb->raddr); tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); if (opc & MO_SIGN) { - tcg_out_sxt(s, TCG_TYPE_I64, size, lb->datalo_reg, TCG_REG_X0); + tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0); } else { tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); } @@ -1032,7 +1032,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, - TCGReg data_reg, TCGReg addr_reg, + TCGType ext, TCGReg data_reg, TCGReg addr_reg, int mem_index, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) { @@ -1040,6 +1040,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, label->is_ld = is_ld; label->opc = opc; + label->type = ext; label->datalo_reg = data_reg; label->addrlo_reg = addr_reg; label->mem_index = mem_index; @@ -1108,7 +1109,7 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits, #endif /* CONFIG_SOFTMMU */ -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, TCGReg data_r, TCGReg addr_r, TCGReg off_r) { const TCGMemOp bswap = memop & MO_BSWAP; @@ -1118,7 +1119,8 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, off_r); break; case MO_SB: - tcg_out_ldst_r(s, I3312_LDRSBX, data_r, addr_r, off_r); + tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW, + data_r, addr_r, off_r); break; case MO_UW: tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); @@ -1130,9 +1132,10 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, if (bswap) { tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); tcg_out_rev16(s, data_r, data_r); - tcg_out_sxt(s, TCG_TYPE_I64, MO_16, data_r, data_r); + tcg_out_sxt(s, ext, MO_16, data_r, data_r); } else { - tcg_out_ldst_r(s, I3312_LDRSHX, data_r, addr_r, off_r); + tcg_out_ldst_r(s, ext ? I3312_LDRSHX : I3312_LDRSHW, + data_r, addr_r, off_r); } break; case MO_UL: @@ -1197,18 +1200,18 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, } static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOp memop, int mem_index) + TCGMemOp memop, TCGType ext, int mem_index) { #ifdef CONFIG_SOFTMMU TCGMemOp s_bits = memop & MO_SIZE; tcg_insn_unit *label_ptr; tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1); - tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, TCG_REG_X1); - add_qemu_ldst_label(s, true, memop, data_reg, addr_reg, + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_REG_X1); + add_qemu_ldst_label(s, true, memop, ext, data_reg, addr_reg, mem_index, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ - tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); #endif /* CONFIG_SOFTMMU */ } @@ -1222,7 +1225,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0); tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1); - add_qemu_ldst_label(s, false, memop, data_reg, addr_reg, + add_qemu_ldst_label(s, false, memop, s_bits == MO_64, data_reg, addr_reg, mem_index, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, @@ -1515,7 +1518,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, a0, a1, a2, args[3]); + tcg_out_qemu_ld(s, a0, a1, a2, ext, args[3]); break; case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i64: diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c index 40f2ec1027..0c4b028580 100644 --- a/tcg/sparc/tcg-target.c +++ b/tcg/sparc/tcg-target.c @@ -197,8 +197,8 @@ static const int tcg_target_call_oarg_regs[] = { #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03)) #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04)) #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14)) -#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08)) -#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c)) +#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08)) +#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c)) #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b)) #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) @@ -209,6 +209,9 @@ static const int tcg_target_call_oarg_regs[] = { #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c)) #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f)) +#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11)) +#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16)) + #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26)) #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27)) @@ -262,6 +265,10 @@ static const int tcg_target_call_oarg_regs[] = { #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE)) +#ifndef use_vis3_instructions +bool use_vis3_instructions; +#endif + static inline int check_fit_i64(int64_t val, unsigned int bits) { return val == sextract64(val, 0, bits); @@ -657,7 +664,7 @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const) { - /* For 32-bit comparisons, we can play games with ADDX/SUBX. */ + /* For 32-bit comparisons, we can play games with ADDC/SUBC. */ switch (cond) { case TCG_COND_LTU: case TCG_COND_GEU: @@ -668,9 +675,12 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, case TCG_COND_NE: /* For equality, we can transform to inequality vs zero. */ if (c2 != 0) { - tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR); + tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR); + c2 = TCG_REG_T1; + } else { + c2 = c1; } - c1 = TCG_REG_G0, c2 = ret, c2const = 0; + c1 = TCG_REG_G0, c2const = 0; cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); break; @@ -698,15 +708,32 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, tcg_out_cmp(s, c1, c2, c2const); if (cond == TCG_COND_LTU) { - tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX); + tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC); } else { - tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX); + tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC); } } static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const) { + if (use_vis3_instructions) { + switch (cond) { + case TCG_COND_NE: + if (c2 != 0) { + break; + } + c2 = c1, c2const = 0, c1 = TCG_REG_G0; + /* FALLTHRU */ + case TCG_COND_LTU: + tcg_out_cmp(s, c1, c2, c2const); + tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC); + return; + default: + break; + } + } + /* For 64-bit signed comparisons vs zero, we can avoid the compare if the input does not overlap the output. */ if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) { @@ -719,9 +746,9 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, } } -static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, - TCGReg al, TCGReg ah, int32_t bl, int blconst, - int32_t bh, int bhconst, int opl, int oph) +static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh, + TCGReg al, TCGReg ah, int32_t bl, int blconst, + int32_t bh, int bhconst, int opl, int oph) { TCGReg tmp = TCG_REG_T1; @@ -735,6 +762,54 @@ static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, tcg_out_mov(s, TCG_TYPE_I32, rl, tmp); } +static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, + TCGReg al, TCGReg ah, int32_t bl, int blconst, + int32_t bh, int bhconst, bool is_sub) +{ + TCGReg tmp = TCG_REG_T1; + + /* Note that the low parts are fully consumed before tmp is set. */ + if (rl != ah && (bhconst || rl != bh)) { + tmp = rl; + } + + tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC); + + if (use_vis3_instructions && !is_sub) { + /* Note that ADDXC doesn't accept immediates. */ + if (bhconst && bh != 0) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh); + bh = TCG_REG_T2; + } + tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); + } else if (bh == TCG_REG_G0) { + /* If we have a zero, we can perform the operation in two insns, + with the arithmetic first, and a conditional move into place. */ + if (rh == ah) { + tcg_out_arithi(s, TCG_REG_T2, ah, 1, + is_sub ? ARITH_SUB : ARITH_ADD); + tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0); + } else { + tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); + tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); + } + } else { + /* Otherwise adjust BH as if there is carry into T2 ... */ + if (bhconst) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1)); + } else { + tcg_out_arithi(s, TCG_REG_T2, bh, 1, + is_sub ? ARITH_SUB : ARITH_ADD); + } + /* ... smoosh T2 back to original BH if carry is clear ... */ + tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst); + /* ... and finally perform the arithmetic with the new operand. */ + tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD); + } + + tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); +} + static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest) { ptrdiff_t disp = tcg_pcrel_diff(s, dest); @@ -1264,12 +1339,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_add2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4], - args[5], const_args[5], ARITH_ADDCC, ARITH_ADDX); + tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], + args[4], const_args[4], args[5], const_args[5], + ARITH_ADDCC, ARITH_ADDC); break; case INDEX_op_sub2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4], - args[5], const_args[5], ARITH_SUBCC, ARITH_SUBX); + tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], + args[4], const_args[4], args[5], const_args[5], + ARITH_SUBCC, ARITH_SUBC); break; case INDEX_op_mulu2_i32: c = ARITH_UMUL; @@ -1351,6 +1428,17 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_movcond_i64: tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); break; + case INDEX_op_add2_i64: + tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], + const_args[4], args[5], const_args[5], false); + break; + case INDEX_op_sub2_i64: + tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], + const_args[4], args[5], const_args[5], true); + break; + case INDEX_op_muluh_i64: + tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI); + break; gen_arith: tcg_out_arithc(s, a0, a1, a2, c2, c); @@ -1449,6 +1537,10 @@ static const TCGTargetOpDef sparc_op_defs[] = { { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } }, { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } }, + { INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, + { INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, + { INDEX_op_muluh_i64, { "R", "RZ", "RZ" } }, + { INDEX_op_qemu_ld_i32, { "r", "A" } }, { INDEX_op_qemu_ld_i64, { "R", "A" } }, { INDEX_op_qemu_st_i32, { "sZ", "A" } }, @@ -1459,6 +1551,15 @@ static const TCGTargetOpDef sparc_op_defs[] = { static void tcg_target_init(TCGContext *s) { + /* Only probe for the platform and capabilities if we havn't already + determined maximum values at compile time. */ +#ifndef use_vis3_instructions + { + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0; + } +#endif + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64); diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h index 089f9761ca..0c4c8af0b2 100644 --- a/tcg/sparc/tcg-target.h +++ b/tcg/sparc/tcg-target.h @@ -85,6 +85,12 @@ typedef enum { #define TCG_TARGET_EXTEND_ARGS 1 #endif +#if defined(__VIS__) && __VIS__ >= 0x300 +#define use_vis3_instructions 1 +#else +extern bool use_vis3_instructions; +#endif + /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 0 @@ -133,11 +139,11 @@ typedef enum { #define TCG_TARGET_HAS_nor_i64 0 #define TCG_TARGET_HAS_deposit_i64 0 #define TCG_TARGET_HAS_movcond_i64 1 -#define TCG_TARGET_HAS_add2_i64 0 -#define TCG_TARGET_HAS_sub2_i64 0 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 #define TCG_TARGET_HAS_mulu2_i64 0 #define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 0 +#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions #define TCG_TARGET_HAS_mulsh_i64 0 #define TCG_AREG0 TCG_REG_I0 diff --git a/tcg/tcg-be-ldst.h b/tcg/tcg-be-ldst.h index 49b3de61ea..429cba24d4 100644 --- a/tcg/tcg-be-ldst.h +++ b/tcg/tcg-be-ldst.h @@ -24,8 +24,9 @@ #define TCG_MAX_QEMU_LDST 640 typedef struct TCGLabelQemuLdst { - bool is_ld:1; /* qemu_ld: true, qemu_st: false */ - TCGMemOp opc:4; + bool is_ld; /* qemu_ld: true, qemu_st: false */ + TCGMemOp opc; + TCGType type; /* result type of a load */ TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ @@ -274,75 +274,54 @@ typedef enum TCGMemOp { typedef tcg_target_ulong TCGArg; -/* Define a type and accessor macros for variables. Using a struct is - nice because it gives some level of type safely. Ideally the compiler - be able to see through all this. However in practice this is not true, - especially on targets with braindamaged ABIs (e.g. i386). - We use plain int by default to avoid this runtime overhead. - Users of tcg_gen_* don't need to know about any of this, and should - treat TCGv as an opaque type. +/* Define a type and accessor macros for variables. Using pointer types + is nice because it gives some level of type safely. Converting to and + from intptr_t rather than int reduces the number of sign-extension + instructions that get implied on 64-bit hosts. Users of tcg_gen_* don't + need to know about any of this, and should treat TCGv as an opaque type. In addition we do typechecking for different types of variables. TCGv_i32 and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr - are aliases for target_ulong and host pointer sized values respectively. - */ + are aliases for target_ulong and host pointer sized values respectively. */ -#ifdef CONFIG_DEBUG_TCG -#define DEBUG_TCGV 1 -#endif +typedef struct TCGv_i32_d *TCGv_i32; +typedef struct TCGv_i64_d *TCGv_i64; +typedef struct TCGv_ptr_d *TCGv_ptr; -#ifdef DEBUG_TCGV +static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i) +{ + return (TCGv_i32)i; +} -typedef struct +static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i) { - int i32; -} TCGv_i32; + return (TCGv_i64)i; +} -typedef struct +static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i) { - int i64; -} TCGv_i64; - -typedef struct { - int iptr; -} TCGv_ptr; - -#define MAKE_TCGV_I32(i) __extension__ \ - ({ TCGv_i32 make_tcgv_tmp = {i}; make_tcgv_tmp;}) -#define MAKE_TCGV_I64(i) __extension__ \ - ({ TCGv_i64 make_tcgv_tmp = {i}; make_tcgv_tmp;}) -#define MAKE_TCGV_PTR(i) __extension__ \ - ({ TCGv_ptr make_tcgv_tmp = {i}; make_tcgv_tmp; }) -#define GET_TCGV_I32(t) ((t).i32) -#define GET_TCGV_I64(t) ((t).i64) -#define GET_TCGV_PTR(t) ((t).iptr) -#if TCG_TARGET_REG_BITS == 32 -#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t)) -#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1) -#endif + return (TCGv_ptr)i; +} + +static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t) +{ + return (intptr_t)t; +} -#else /* !DEBUG_TCGV */ +static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t) +{ + return (intptr_t)t; +} -typedef int TCGv_i32; -typedef int TCGv_i64; -#if TCG_TARGET_REG_BITS == 32 -#define TCGv_ptr TCGv_i32 -#else -#define TCGv_ptr TCGv_i64 -#endif -#define MAKE_TCGV_I32(x) (x) -#define MAKE_TCGV_I64(x) (x) -#define MAKE_TCGV_PTR(x) (x) -#define GET_TCGV_I32(t) (t) -#define GET_TCGV_I64(t) (t) -#define GET_TCGV_PTR(t) (t) +static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t) +{ + return (intptr_t)t; +} #if TCG_TARGET_REG_BITS == 32 -#define TCGV_LOW(t) (t) -#define TCGV_HIGH(t) ((t) + 1) +#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t)) +#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1) #endif -#endif /* DEBUG_TCGV */ - #define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b)) #define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b)) #define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b)) diff --git a/tests/libqos/virtio.c b/tests/libqos/virtio.c index 9b6de2c0a7..a061289249 100644 --- a/tests/libqos/virtio.c +++ b/tests/libqos/virtio.c @@ -78,30 +78,54 @@ void qvirtio_set_driver_ok(const QVirtioBus *bus, QVirtioDevice *d) QVIRTIO_DRIVER_OK | QVIRTIO_DRIVER | QVIRTIO_ACKNOWLEDGE); } -bool qvirtio_wait_queue_isr(const QVirtioBus *bus, QVirtioDevice *d, - QVirtQueue *vq, uint64_t timeout) +void qvirtio_wait_queue_isr(const QVirtioBus *bus, QVirtioDevice *d, + QVirtQueue *vq, gint64 timeout_us) { - do { + gint64 start_time = g_get_monotonic_time(); + + for (;;) { clock_step(100); if (bus->get_queue_isr_status(d, vq)) { - break; /* It has ended */ + return; } - } while (--timeout); + g_assert(g_get_monotonic_time() - start_time <= timeout_us); + } +} + +/* Wait for the status byte at given guest memory address to be set + * + * The virtqueue interrupt must not be raised, making this useful for testing + * event_index functionality. + */ +uint8_t qvirtio_wait_status_byte_no_isr(const QVirtioBus *bus, + QVirtioDevice *d, + QVirtQueue *vq, + uint64_t addr, + gint64 timeout_us) +{ + gint64 start_time = g_get_monotonic_time(); + uint8_t val; - return timeout != 0; + while ((val = readb(addr)) == 0xff) { + clock_step(100); + g_assert(!bus->get_queue_isr_status(d, vq)); + g_assert(g_get_monotonic_time() - start_time <= timeout_us); + } + return val; } -bool qvirtio_wait_config_isr(const QVirtioBus *bus, QVirtioDevice *d, - uint64_t timeout) +void qvirtio_wait_config_isr(const QVirtioBus *bus, QVirtioDevice *d, + gint64 timeout_us) { - do { + gint64 start_time = g_get_monotonic_time(); + + for (;;) { clock_step(100); if (bus->get_config_isr_status(d)) { - break; /* It has ended */ + return; } - } while (--timeout); - - return timeout != 0; + g_assert(g_get_monotonic_time() - start_time <= timeout_us); + } } void qvring_init(const QGuestAllocator *alloc, QVirtQueue *vq, uint64_t addr) diff --git a/tests/libqos/virtio.h b/tests/libqos/virtio.h index 70b3376360..29fbacbc99 100644 --- a/tests/libqos/virtio.h +++ b/tests/libqos/virtio.h @@ -160,10 +160,15 @@ void qvirtio_set_acknowledge(const QVirtioBus *bus, QVirtioDevice *d); void qvirtio_set_driver(const QVirtioBus *bus, QVirtioDevice *d); void qvirtio_set_driver_ok(const QVirtioBus *bus, QVirtioDevice *d); -bool qvirtio_wait_queue_isr(const QVirtioBus *bus, QVirtioDevice *d, - QVirtQueue *vq, uint64_t timeout); -bool qvirtio_wait_config_isr(const QVirtioBus *bus, QVirtioDevice *d, - uint64_t timeout); +void qvirtio_wait_queue_isr(const QVirtioBus *bus, QVirtioDevice *d, + QVirtQueue *vq, gint64 timeout_us); +uint8_t qvirtio_wait_status_byte_no_isr(const QVirtioBus *bus, + QVirtioDevice *d, + QVirtQueue *vq, + uint64_t addr, + gint64 timeout_us); +void qvirtio_wait_config_isr(const QVirtioBus *bus, QVirtioDevice *d, + gint64 timeout_us); QVirtQueue *qvirtqueue_setup(const QVirtioBus *bus, QVirtioDevice *d, QGuestAllocator *alloc, uint16_t index); diff --git a/tests/qapi-schema/qapi-schema-test.json b/tests/qapi-schema/qapi-schema-test.json index ab4d3d96b6..d43b5fd2e9 100644 --- a/tests/qapi-schema/qapi-schema-test.json +++ b/tests/qapi-schema/qapi-schema-test.json @@ -33,6 +33,9 @@ { 'type': 'UserDefB', 'data': { 'integer': 'int' } } +{ 'type': 'UserDefC', + 'data': { 'string1': 'str', 'string2': 'str' } } + { 'union': 'UserDefUnion', 'base': 'UserDefZero', 'data': { 'a' : 'UserDefA', 'b' : 'UserDefB' } } @@ -47,6 +50,13 @@ # FIXME generated struct UserDefFlatUnion has members for direct base # UserDefOne, but lacks members for indirect base UserDefZero +# this variant of UserDefFlatUnion defaults to a union that uses fields with +# allocated types to test corner cases in the cleanup/dealloc visitor +{ 'union': 'UserDefFlatUnion2', + 'base': 'UserDefUnionBase', + 'discriminator': 'enum1', + 'data': { 'value1' : 'UserDefC', 'value2' : 'UserDefB', 'value3' : 'UserDefA' } } + { 'union': 'UserDefAnonUnion', 'discriminator': {}, 'data': { 'uda': 'UserDefA', 's': 'str', 'i': 'int' } } diff --git a/tests/qapi-schema/qapi-schema-test.out b/tests/qapi-schema/qapi-schema-test.out index 95e989925b..08d7304dfa 100644 --- a/tests/qapi-schema/qapi-schema-test.out +++ b/tests/qapi-schema/qapi-schema-test.out @@ -6,9 +6,11 @@ OrderedDict([('type', 'UserDefNested'), ('data', OrderedDict([('string0', 'str'), ('dict1', OrderedDict([('string1', 'str'), ('dict2', OrderedDict([('userdef1', 'UserDefOne'), ('string2', 'str')])), ('*dict3', OrderedDict([('userdef2', 'UserDefOne'), ('string3', 'str')]))]))]))]), OrderedDict([('type', 'UserDefA'), ('data', OrderedDict([('boolean', 'bool')]))]), OrderedDict([('type', 'UserDefB'), ('data', OrderedDict([('integer', 'int')]))]), + OrderedDict([('type', 'UserDefC'), ('data', OrderedDict([('string1', 'str'), ('string2', 'str')]))]), OrderedDict([('union', 'UserDefUnion'), ('base', 'UserDefZero'), ('data', OrderedDict([('a', 'UserDefA'), ('b', 'UserDefB')]))]), OrderedDict([('type', 'UserDefUnionBase'), ('data', OrderedDict([('string', 'str'), ('enum1', 'EnumOne')]))]), OrderedDict([('union', 'UserDefFlatUnion'), ('base', 'UserDefUnionBase'), ('discriminator', 'enum1'), ('data', OrderedDict([('value1', 'UserDefA'), ('value2', 'UserDefB'), ('value3', 'UserDefB')]))]), + OrderedDict([('union', 'UserDefFlatUnion2'), ('base', 'UserDefUnionBase'), ('discriminator', 'enum1'), ('data', OrderedDict([('value1', 'UserDefC'), ('value2', 'UserDefB'), ('value3', 'UserDefA')]))]), OrderedDict([('union', 'UserDefAnonUnion'), ('discriminator', OrderedDict()), ('data', OrderedDict([('uda', 'UserDefA'), ('s', 'str'), ('i', 'int')]))]), OrderedDict([('union', 'UserDefNativeListUnion'), ('data', OrderedDict([('integer', ['int']), ('s8', ['int8']), ('s16', ['int16']), ('s32', ['int32']), ('s64', ['int64']), ('u8', ['uint8']), ('u16', ['uint16']), ('u32', ['uint32']), ('u64', ['uint64']), ('number', ['number']), ('boolean', ['bool']), ('string', ['str'])]))]), OrderedDict([('command', 'user_def_cmd'), ('data', OrderedDict())]), @@ -32,6 +34,7 @@ OrderedDict([('type', 'UserDefNested'), ('data', OrderedDict([('string0', 'str'), ('dict1', OrderedDict([('string1', 'str'), ('dict2', OrderedDict([('userdef1', 'UserDefOne'), ('string2', 'str')])), ('*dict3', OrderedDict([('userdef2', 'UserDefOne'), ('string3', 'str')]))]))]))]), OrderedDict([('type', 'UserDefA'), ('data', OrderedDict([('boolean', 'bool')]))]), OrderedDict([('type', 'UserDefB'), ('data', OrderedDict([('integer', 'int')]))]), + OrderedDict([('type', 'UserDefC'), ('data', OrderedDict([('string1', 'str'), ('string2', 'str')]))]), OrderedDict([('type', 'UserDefUnionBase'), ('data', OrderedDict([('string', 'str'), ('enum1', 'EnumOne')]))]), OrderedDict([('type', 'UserDefOptions'), ('data', OrderedDict([('*i64', ['int']), ('*u64', ['uint64']), ('*u16', ['uint16']), ('*i64x', 'int'), ('*u64x', 'uint64')]))]), OrderedDict([('type', 'EventStructOne'), ('data', OrderedDict([('struct1', 'UserDefOne'), ('string', 'str'), ('*enum2', 'EnumOne')]))])] diff --git a/tests/qemu-iotests/087 b/tests/qemu-iotests/087 index 82c56b1394..d7454d13da 100755 --- a/tests/qemu-iotests/087 +++ b/tests/qemu-iotests/087 @@ -218,6 +218,23 @@ run_qemu <<EOF { "execute": "quit" } EOF +echo +echo === Missing driver === +echo + +_make_test_img -o encryption=on $size +run_qemu -S <<EOF +{ "execute": "qmp_capabilities" } +{ "execute": "blockdev-add", + "arguments": { + "options": { + "id": "disk" + } + } + } +{ "execute": "quit" } +EOF + # success, all done echo "*** done" rm -f $seq.full diff --git a/tests/qemu-iotests/087.out b/tests/qemu-iotests/087.out index 75a54e0c36..e8795b3a10 100644 --- a/tests/qemu-iotests/087.out +++ b/tests/qemu-iotests/087.out @@ -64,4 +64,17 @@ QMP_VERSION {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "ide1-cd0", "tray-open": true}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "floppy0", "tray-open": true}} + +=== Missing driver === + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 encryption=on +Testing: -S +QMP_VERSION +{"return": {}} +{"error": {"class": "GenericError", "desc": "Invalid parameter type for 'driver', expected: string"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN"} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "ide1-cd0", "tray-open": true}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "floppy0", "tray-open": true}} + *** done diff --git a/tests/test-qmp-input-strict.c b/tests/test-qmp-input-strict.c index 0f770034b1..d5360c6a87 100644 --- a/tests/test-qmp-input-strict.c +++ b/tests/test-qmp-input-strict.c @@ -260,6 +260,21 @@ static void test_validate_fail_union_flat(TestInputVisitorData *data, qapi_free_UserDefFlatUnion(tmp); } +static void test_validate_fail_union_flat_no_discrim(TestInputVisitorData *data, + const void *unused) +{ + UserDefFlatUnion2 *tmp = NULL; + Error *err = NULL; + Visitor *v; + + /* test situation where discriminator field ('enum1' here) is missing */ + v = validate_test_init(data, "{ 'string': 'c', 'string1': 'd', 'string2': 'e' }"); + + visit_type_UserDefFlatUnion2(v, &tmp, NULL, &err); + g_assert(err); + qapi_free_UserDefFlatUnion2(tmp); +} + static void test_validate_fail_union_anon(TestInputVisitorData *data, const void *unused) { @@ -310,6 +325,8 @@ int main(int argc, char **argv) &testdata, test_validate_fail_union); validate_test_add("/visitor/input-strict/fail/union-flat", &testdata, test_validate_fail_union_flat); + validate_test_add("/visitor/input-strict/fail/union-flat-no-discriminator", + &testdata, test_validate_fail_union_flat_no_discrim); validate_test_add("/visitor/input-strict/fail/union-anon", &testdata, test_validate_fail_union_anon); diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c index 588666cff1..5ce6e79757 100644 --- a/tests/virtio-blk-test.c +++ b/tests/virtio-blk-test.c @@ -41,7 +41,7 @@ #define QVIRTIO_BLK_T_GET_ID 8 #define TEST_IMAGE_SIZE (64 * 1024 * 1024) -#define QVIRTIO_BLK_TIMEOUT 100 +#define QVIRTIO_BLK_TIMEOUT_US (30 * 1000 * 1000) #define PCI_SLOT 0x04 #define PCI_FN 0x00 @@ -183,8 +183,8 @@ static void pci_basic(void) qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false); qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); @@ -205,8 +205,8 @@ static void pci_basic(void) qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); @@ -233,8 +233,8 @@ static void pci_basic(void) qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); @@ -256,8 +256,8 @@ static void pci_basic(void) qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); @@ -329,8 +329,8 @@ static void pci_indirect(void) free_head = qvirtqueue_add_indirect(&vqpci->vq, indirect); qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); @@ -354,8 +354,8 @@ static void pci_indirect(void) free_head = qvirtqueue_add_indirect(&vqpci->vq, indirect); qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); @@ -396,8 +396,7 @@ static void pci_config(void) qmp("{ 'execute': 'block_resize', 'arguments': { 'device': 'drive0', " " 'size': %d } }", n_size); - g_assert(qvirtio_wait_config_isr(&qvirtio_pci, &dev->vdev, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_config_isr(&qvirtio_pci, &dev->vdev, QVIRTIO_BLK_TIMEOUT_US); capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr); g_assert_cmpint(capacity, ==, n_size / 512); @@ -452,8 +451,7 @@ static void pci_msix(void) qmp("{ 'execute': 'block_resize', 'arguments': { 'device': 'drive0', " " 'size': %d } }", n_size); - g_assert(qvirtio_wait_config_isr(&qvirtio_pci, &dev->vdev, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_config_isr(&qvirtio_pci, &dev->vdev, QVIRTIO_BLK_TIMEOUT_US); capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev, addr); g_assert_cmpint(capacity, ==, n_size / 512); @@ -473,8 +471,8 @@ static void pci_msix(void) qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false); qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); @@ -497,8 +495,8 @@ static void pci_msix(void) qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); @@ -574,8 +572,8 @@ static void pci_idx(void) qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false); qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); /* Write request */ req.type = QVIRTIO_BLK_T_OUT; @@ -595,10 +593,9 @@ static void pci_idx(void) qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); /* No notification expected */ - g_assert(!qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); - - status = readb(req_addr + 528); + status = qvirtio_wait_status_byte_no_isr(&qvirtio_pci, &dev->vdev, + &vqpci->vq, req_addr + 528, + QVIRTIO_BLK_TIMEOUT_US); g_assert_cmpint(status, ==, 0); guest_free(alloc, req_addr); @@ -619,8 +616,8 @@ static void pci_idx(void) qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head); - g_assert(qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, - QVIRTIO_BLK_TIMEOUT)); + qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq, + QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); diff --git a/ui/console.c b/ui/console.c index f819382b24..258af5dfff 100644 --- a/ui/console.c +++ b/ui/console.c @@ -1677,6 +1677,14 @@ DisplayState *init_displaystate(void) return display_state; } +void graphic_console_set_hwops(QemuConsole *con, + const GraphicHwOps *hw_ops, + void *opaque) +{ + con->hw_ops = hw_ops; + con->hw = opaque; +} + QemuConsole *graphic_console_init(DeviceState *dev, uint32_t head, const GraphicHwOps *hw_ops, void *opaque) @@ -1691,8 +1699,7 @@ QemuConsole *graphic_console_init(DeviceState *dev, uint32_t head, ds = get_alloc_displaystate(); trace_console_gfx_new(); s = new_console(ds, GRAPHIC_CONSOLE, head); - s->hw_ops = hw_ops; - s->hw = opaque; + graphic_console_set_hwops(s, hw_ops, opaque); if (dev) { object_property_set_link(OBJECT(s), OBJECT(dev), "device", &error_abort); diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index 4a25585b2e..1eef590af5 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -159,7 +159,7 @@ int inet_listen_opts(QemuOpts *opts, int port_offset, Error **errp) slisten = qemu_socket(e->ai_family, e->ai_socktype, e->ai_protocol); if (slisten < 0) { if (!e->ai_next) { - error_set_errno(errp, errno, QERR_SOCKET_CREATE_FAILED); + error_setg_errno(errp, errno, "Failed to create socket"); } continue; } @@ -183,7 +183,7 @@ int inet_listen_opts(QemuOpts *opts, int port_offset, Error **errp) } if (p == port_max) { if (!e->ai_next) { - error_set_errno(errp, errno, QERR_SOCKET_BIND_FAILED); + error_setg_errno(errp, errno, "Failed to bind socket"); } } } @@ -194,7 +194,7 @@ int inet_listen_opts(QemuOpts *opts, int port_offset, Error **errp) listen: if (listen(slisten,1) != 0) { - error_set_errno(errp, errno, QERR_SOCKET_LISTEN_FAILED); + error_setg_errno(errp, errno, "Failed to listen on socket"); closesocket(slisten); freeaddrinfo(res); return -1; @@ -281,7 +281,7 @@ static int inet_connect_addr(struct addrinfo *addr, bool *in_progress, sock = qemu_socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol); if (sock < 0) { - error_set_errno(errp, errno, QERR_SOCKET_CREATE_FAILED); + error_setg_errno(errp, errno, "Failed to create socket"); return -1; } socket_set_fast_reuse(sock); @@ -302,7 +302,7 @@ static int inet_connect_addr(struct addrinfo *addr, bool *in_progress, connect_state); *in_progress = true; } else if (rc < 0) { - error_set_errno(errp, errno, QERR_SOCKET_CONNECT_FAILED); + error_setg_errno(errp, errno, "Failed to connect socket"); closesocket(sock); return -1; } @@ -466,20 +466,20 @@ int inet_dgram_opts(QemuOpts *opts, Error **errp) /* create socket */ sock = qemu_socket(peer->ai_family, peer->ai_socktype, peer->ai_protocol); if (sock < 0) { - error_set_errno(errp, errno, QERR_SOCKET_CREATE_FAILED); + error_setg_errno(errp, errno, "Failed to create socket"); goto err; } socket_set_fast_reuse(sock); /* bind socket */ if (bind(sock, local->ai_addr, local->ai_addrlen) < 0) { - error_set_errno(errp, errno, QERR_SOCKET_BIND_FAILED); + error_setg_errno(errp, errno, "Failed to bind socket"); goto err; } /* connect to peer */ if (connect(sock,peer->ai_addr,peer->ai_addrlen) < 0) { - error_set_errno(errp, errno, QERR_SOCKET_CONNECT_FAILED); + error_setg_errno(errp, errno, "Failed to connect socket"); goto err; } @@ -684,7 +684,7 @@ int unix_listen_opts(QemuOpts *opts, Error **errp) sock = qemu_socket(PF_UNIX, SOCK_STREAM, 0); if (sock < 0) { - error_set_errno(errp, errno, QERR_SOCKET_CREATE_FAILED); + error_setg_errno(errp, errno, "Failed to create socket"); return -1; } @@ -709,11 +709,11 @@ int unix_listen_opts(QemuOpts *opts, Error **errp) unlink(un.sun_path); if (bind(sock, (struct sockaddr*) &un, sizeof(un)) < 0) { - error_set_errno(errp, errno, QERR_SOCKET_BIND_FAILED); + error_setg_errno(errp, errno, "Failed to bind socket"); goto err; } if (listen(sock, 1) < 0) { - error_set_errno(errp, errno, QERR_SOCKET_LISTEN_FAILED); + error_setg_errno(errp, errno, "Failed to listen on socket"); goto err; } @@ -739,7 +739,7 @@ int unix_connect_opts(QemuOpts *opts, Error **errp, sock = qemu_socket(PF_UNIX, SOCK_STREAM, 0); if (sock < 0) { - error_set_errno(errp, errno, QERR_SOCKET_CREATE_FAILED); + error_setg_errno(errp, errno, "Failed to create socket"); return -1; } if (callback != NULL) { @@ -774,7 +774,7 @@ int unix_connect_opts(QemuOpts *opts, Error **errp, } if (rc < 0) { - error_set_errno(errp, -rc, QERR_SOCKET_CONNECT_FAILED); + error_setg_errno(errp, -rc, "Failed to connect socket"); close(sock); sock = -1; } |